diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -48,6 +48,7 @@ bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset); bool SelectBaseAddr(SDValue Addr, SDValue &Base); bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset); + bool SelectAddrReg(SDValue Addr, SDValue &Base); bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt); bool selectShiftMaskXLen(SDValue N, SDValue &ShAmt) { diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -287,9 +287,7 @@ SDValue Chain = Node->getOperand(0); SDValue Glue; - SDValue Base; - SelectBaseAddr(Node->getOperand(CurOp++), Base); - Operands.push_back(Base); // Base pointer. + Operands.push_back(Node->getOperand(CurOp++)); // Base pointer. if (IsStridedOrIndexed) { Operands.push_back(Node->getOperand(CurOp++)); // Index. @@ -1825,10 +1823,11 @@ bool RISCVDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) { // If this is FrameIndex, select it directly. Otherwise just let it get // selected to a register independently. - if (auto *FIN = dyn_cast(Addr)) + if (auto *FIN = dyn_cast(Addr)) { Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT()); - else + } else { Base = Addr; + } return true; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -373,7 +373,6 @@ // Necessary because a frameindex can't be matched directly in a pattern. def FrameAddrRegImm : ComplexPattern; -def BaseAddr : ComplexPattern; def AddrRegImm : ComplexPattern; // Return the negation of an immediate value. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -34,11 +34,11 @@ defvar load_instr = !cast("PseudoVLE"#sew#"_V_"#vlmul.MX); defvar store_instr = !cast("PseudoVSE"#sew#"_V_"#vlmul.MX); // Load - def : Pat<(type (load BaseAddr:$rs1)), - (load_instr BaseAddr:$rs1, avl, log2sew)>; + def : Pat<(type (load GPR:$rs1)), + (load_instr GPR:$rs1, avl, log2sew)>; // Store - def : Pat<(store type:$rs2, BaseAddr:$rs1), - (store_instr reg_class:$rs2, BaseAddr:$rs1, avl, log2sew)>; + def : Pat<(store type:$rs2, GPR:$rs1), + (store_instr reg_class:$rs2, GPR:$rs1, avl, log2sew)>; } multiclass VPatUSLoadStoreWholeVRSDNode("VS"#!substr(vlmul.MX, 1)#"R_V"); // Load - def : Pat<(type (load BaseAddr:$rs1)), - (load_instr BaseAddr:$rs1)>; + def : Pat<(type (load GPR:$rs1)), + (load_instr GPR:$rs1)>; // Store - def : Pat<(store type:$rs2, BaseAddr:$rs1), - (store_instr reg_class:$rs2, BaseAddr:$rs1)>; + def : Pat<(store type:$rs2, GPR:$rs1), + (store_instr reg_class:$rs2, GPR:$rs1)>; } multiclass VPatUSLoadStoreMaskSDNode @@ -65,11 +65,11 @@ defvar load_instr = !cast("PseudoVLM_V_"#m.BX); defvar store_instr = !cast("PseudoVSM_V_"#m.BX); // Load - def : Pat<(m.Mask (load BaseAddr:$rs1)), - (load_instr BaseAddr:$rs1, m.AVL, m.Log2SEW)>; + def : Pat<(m.Mask (load GPR:$rs1)), + (load_instr GPR:$rs1, m.AVL, m.Log2SEW)>; // Store - def : Pat<(store m.Mask:$rs2, BaseAddr:$rs1), - (store_instr VR:$rs2, BaseAddr:$rs1, m.AVL, m.Log2SEW)>; + def : Pat<(store m.Mask:$rs2, GPR:$rs1), + (store_instr VR:$rs2, GPR:$rs1, m.AVL, m.Log2SEW)>; } class VPatBinarySDNode_VV("PseudoVSE"#vti.SEW#"_V_"#vti.LMul.MX); - def : Pat<(store (vti.Scalar (int_riscv_vfmv_f_s (vti.Vector vti.RegClass:$rs2))), BaseAddr:$rs1), - (store_instr vti.RegClass:$rs2, BaseAddr:$rs1, 1, vti.Log2SEW)>; - def : Pat<(store (extractelt (vti.Vector vti.RegClass:$rs2), 0), BaseAddr:$rs1), - (store_instr vti.RegClass:$rs2, BaseAddr:$rs1, 1, vti.Log2SEW)>; + + let AddedComplexity = 1 in { + // Add complexity to increase the priority of this pattern being matched. + def : Pat<(store (vti.Scalar (int_riscv_vfmv_f_s (vti.Vector vti.RegClass:$rs2))), GPR:$rs1), + (store_instr vti.RegClass:$rs2, GPR:$rs1, 1, vti.Log2SEW)>; + def : Pat<(store (extractelt (vti.Vector vti.RegClass:$rs2), 0), GPR:$rs1), + (store_instr vti.RegClass:$rs2, GPR:$rs1, 1, vti.Log2SEW)>; + } defvar vmv_f_s_inst = !cast(!strconcat("PseudoVFMV_", vti.ScalarSuffix, diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll --- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll @@ -31,22 +31,22 @@ define @access_fixed_and_vector_objects(i64 *%val) { ; RV64IV-LABEL: access_fixed_and_vector_objects: ; RV64IV: # %bb.0: -; RV64IV-NEXT: addi sp, sp, -544 -; RV64IV-NEXT: .cfi_def_cfa_offset 544 +; RV64IV-NEXT: addi sp, sp, -528 +; RV64IV-NEXT: .cfi_def_cfa_offset 528 ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: sub sp, sp, a0 -; RV64IV-NEXT: addi a0, sp, 24 +; RV64IV-NEXT: addi a0, sp, 8 ; RV64IV-NEXT: vl1re64.v v8, (a0) -; RV64IV-NEXT: ld a0, 536(sp) -; RV64IV-NEXT: addi a1, sp, 544 -; RV64IV-NEXT: vl1re64.v v9, (a1) -; RV64IV-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64IV-NEXT: addi a0, sp, 528 +; RV64IV-NEXT: ld a1, 520(sp) +; RV64IV-NEXT: vl1re64.v v9, (a0) +; RV64IV-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64IV-NEXT: vadd.vv v8, v8, v9 ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: add sp, sp, a0 -; RV64IV-NEXT: addi sp, sp, 544 +; RV64IV-NEXT: addi sp, sp, 528 ; RV64IV-NEXT: ret %local = alloca i64 %vector = alloca diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll @@ -1016,38 +1016,32 @@ ; RV32-NEXT: vsrl.vi v12, v8, 8 ; RV32-NEXT: vand.vv v10, v12, v10 ; RV32-NEXT: vor.vv v10, v10, v11 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vlse64.v v11, (a1), zero ; RV32-NEXT: vor.vv v9, v10, v9 ; RV32-NEXT: vsll.vx v10, v8, a2 ; RV32-NEXT: vsll.vx v12, v8, a3 ; RV32-NEXT: vand.vv v11, v12, v11 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vlse64.v v12, (a1), zero ; RV32-NEXT: vor.vv v10, v10, v11 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vlse64.v v11, (a1), zero ; RV32-NEXT: vsll.vi v13, v8, 8 ; RV32-NEXT: vand.vv v12, v13, v12 ; RV32-NEXT: vsll.vi v8, v8, 24 ; RV32-NEXT: vand.vv v8, v8, v11 ; RV32-NEXT: vor.vv v8, v8, v12 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vlse64.v v11, (a1), zero ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: vsrl.vi v9, v8, 4 ; RV32-NEXT: vand.vv v9, v9, v11 ; RV32-NEXT: vand.vv v8, v8, v11 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vlse64.v v10, (a1), zero ; RV32-NEXT: vsll.vi v8, v8, 4 ; RV32-NEXT: vor.vv v8, v9, v8 ; RV32-NEXT: vsrl.vi v9, v8, 2 ; RV32-NEXT: vand.vv v9, v9, v10 ; RV32-NEXT: vand.vv v8, v8, v10 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vlse64.v v10, (a1), zero ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vor.vv v8, v9, v8 ; RV32-NEXT: vsrl.vi v9, v8, 1 @@ -1161,38 +1155,32 @@ ; RV32-NEXT: vsrl.vi v16, v8, 8 ; RV32-NEXT: vand.vv v12, v16, v12 ; RV32-NEXT: vor.vv v12, v12, v14 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v14, (a0), zero +; RV32-NEXT: vlse64.v v14, (a1), zero ; RV32-NEXT: vor.vv v10, v12, v10 ; RV32-NEXT: vsll.vx v12, v8, a2 ; RV32-NEXT: vsll.vx v16, v8, a3 ; RV32-NEXT: vand.vv v14, v16, v14 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vor.vv v12, v12, v14 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v14, (a0), zero +; RV32-NEXT: vlse64.v v14, (a1), zero ; RV32-NEXT: vsll.vi v18, v8, 8 ; RV32-NEXT: vand.vv v16, v18, v16 ; RV32-NEXT: vsll.vi v8, v8, 24 ; RV32-NEXT: vand.vv v8, v8, v14 ; RV32-NEXT: vor.vv v8, v8, v16 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v14, (a0), zero +; RV32-NEXT: vlse64.v v14, (a1), zero ; RV32-NEXT: vor.vv v8, v12, v8 ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: vsrl.vi v10, v8, 4 ; RV32-NEXT: vand.vv v10, v10, v14 ; RV32-NEXT: vand.vv v8, v8, v14 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vlse64.v v12, (a1), zero ; RV32-NEXT: vsll.vi v8, v8, 4 ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: vsrl.vi v10, v8, 2 ; RV32-NEXT: vand.vv v10, v10, v12 ; RV32-NEXT: vand.vv v8, v8, v12 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vlse64.v v12, (a1), zero ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: vsrl.vi v10, v8, 1 @@ -1306,38 +1294,32 @@ ; RV32-NEXT: vsrl.vi v24, v8, 8 ; RV32-NEXT: vand.vv v16, v24, v16 ; RV32-NEXT: vor.vv v16, v16, v20 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v20, (a0), zero +; RV32-NEXT: vlse64.v v20, (a1), zero ; RV32-NEXT: vor.vv v12, v16, v12 ; RV32-NEXT: vsll.vx v16, v8, a2 ; RV32-NEXT: vsll.vx v24, v8, a3 ; RV32-NEXT: vand.vv v20, v24, v20 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vlse64.v v24, (a1), zero ; RV32-NEXT: vor.vv v16, v16, v20 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v20, (a0), zero +; RV32-NEXT: vlse64.v v20, (a1), zero ; RV32-NEXT: vsll.vi v28, v8, 8 ; RV32-NEXT: vand.vv v24, v28, v24 ; RV32-NEXT: vsll.vi v8, v8, 24 ; RV32-NEXT: vand.vv v8, v8, v20 ; RV32-NEXT: vor.vv v8, v8, v24 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v20, (a0), zero +; RV32-NEXT: vlse64.v v20, (a1), zero ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: vsrl.vi v12, v8, 4 ; RV32-NEXT: vand.vv v12, v12, v20 ; RV32-NEXT: vand.vv v8, v8, v20 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsll.vi v8, v8, 4 ; RV32-NEXT: vor.vv v8, v12, v8 ; RV32-NEXT: vsrl.vi v12, v8, 2 ; RV32-NEXT: vand.vv v12, v12, v16 ; RV32-NEXT: vand.vv v8, v8, v16 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vor.vv v8, v12, v8 ; RV32-NEXT: vsrl.vi v12, v8, 1 @@ -1442,24 +1424,23 @@ ; RV32-NEXT: sw a2, 8(sp) ; RV32-NEXT: li a2, 56 ; RV32-NEXT: vsetvli a3, zero, e64, m8, ta, mu -; RV32-NEXT: vsrl.vx v16, v8, a2 ; RV32-NEXT: li a3, 40 -; RV32-NEXT: vsrl.vx v24, v8, a3 -; RV32-NEXT: addi a4, sp, 8 -; RV32-NEXT: vlse64.v v0, (a4), zero -; RV32-NEXT: vand.vx v24, v24, a1 -; RV32-NEXT: vor.vv v16, v24, v16 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 3 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV32-NEXT: vsrl.vi v24, v8, 8 -; RV32-NEXT: vand.vv v24, v24, v0 +; RV32-NEXT: vsrl.vx v16, v8, a3 +; RV32-NEXT: vand.vx v16, v16, a1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsrl.vx v0, v8, a2 +; RV32-NEXT: vor.vv v16, v16, v0 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 3 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v0, v8, 8 +; RV32-NEXT: vand.vv v24, v0, v24 ; RV32-NEXT: vsrl.vi v0, v8, 24 ; RV32-NEXT: vand.vx v0, v0, a0 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vor.vv v24, v24, v0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 @@ -1475,20 +1456,17 @@ ; RV32-NEXT: vsll.vx v24, v8, a3 ; RV32-NEXT: vand.vv v16, v24, v16 ; RV32-NEXT: vsll.vx v24, v8, a2 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v0, (a0), zero +; RV32-NEXT: vlse64.v v0, (a1), zero ; RV32-NEXT: vor.vv v16, v24, v16 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsll.vi v24, v8, 8 ; RV32-NEXT: vand.vv v24, v24, v0 ; RV32-NEXT: vsll.vi v8, v8, 24 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vor.vv v8, v8, v24 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v24, v8 @@ -1501,15 +1479,13 @@ ; RV32-NEXT: vsrl.vi v24, v8, 4 ; RV32-NEXT: vand.vv v24, v24, v16 ; RV32-NEXT: vand.vv v8, v8, v16 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsll.vi v8, v8, 4 ; RV32-NEXT: vor.vv v8, v24, v8 ; RV32-NEXT: vsrl.vi v24, v8, 2 ; RV32-NEXT: vand.vv v24, v24, v16 ; RV32-NEXT: vand.vv v8, v8, v16 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vor.vv v8, v24, v8 ; RV32-NEXT: vsrl.vi v24, v8, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll @@ -305,17 +305,14 @@ ; RV32-NEXT: vsrl.vi v12, v8, 8 ; RV32-NEXT: vand.vv v10, v12, v10 ; RV32-NEXT: vor.vv v10, v10, v11 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vlse64.v v11, (a1), zero ; RV32-NEXT: vor.vv v9, v10, v9 ; RV32-NEXT: vsll.vx v10, v8, a2 ; RV32-NEXT: vsll.vx v12, v8, a3 ; RV32-NEXT: vand.vv v11, v12, v11 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vlse64.v v12, (a1), zero ; RV32-NEXT: vor.vv v10, v10, v11 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v11, (a0), zero +; RV32-NEXT: vlse64.v v11, (a1), zero ; RV32-NEXT: vsll.vi v13, v8, 8 ; RV32-NEXT: vand.vv v12, v13, v12 ; RV32-NEXT: vsll.vi v8, v8, 24 @@ -396,17 +393,14 @@ ; RV32-NEXT: vsrl.vi v16, v8, 8 ; RV32-NEXT: vand.vv v12, v16, v12 ; RV32-NEXT: vor.vv v12, v12, v14 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v14, (a0), zero +; RV32-NEXT: vlse64.v v14, (a1), zero ; RV32-NEXT: vor.vv v10, v12, v10 ; RV32-NEXT: vsll.vx v12, v8, a2 ; RV32-NEXT: vsll.vx v16, v8, a3 ; RV32-NEXT: vand.vv v14, v16, v14 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vor.vv v12, v12, v14 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v14, (a0), zero +; RV32-NEXT: vlse64.v v14, (a1), zero ; RV32-NEXT: vsll.vi v18, v8, 8 ; RV32-NEXT: vand.vv v16, v18, v16 ; RV32-NEXT: vsll.vi v8, v8, 24 @@ -487,17 +481,14 @@ ; RV32-NEXT: vsrl.vi v24, v8, 8 ; RV32-NEXT: vand.vv v16, v24, v16 ; RV32-NEXT: vor.vv v16, v16, v20 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v20, (a0), zero +; RV32-NEXT: vlse64.v v20, (a1), zero ; RV32-NEXT: vor.vv v12, v16, v12 ; RV32-NEXT: vsll.vx v16, v8, a2 ; RV32-NEXT: vsll.vx v24, v8, a3 ; RV32-NEXT: vand.vv v20, v24, v20 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vlse64.v v24, (a1), zero ; RV32-NEXT: vor.vv v16, v16, v20 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v20, (a0), zero +; RV32-NEXT: vlse64.v v20, (a1), zero ; RV32-NEXT: vsll.vi v28, v8, 8 ; RV32-NEXT: vand.vv v24, v28, v24 ; RV32-NEXT: vsll.vi v8, v8, 24 @@ -569,24 +560,23 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: li a2, 56 ; RV32-NEXT: vsetvli a3, zero, e64, m8, ta, mu -; RV32-NEXT: vsrl.vx v16, v8, a2 ; RV32-NEXT: li a3, 40 -; RV32-NEXT: vsrl.vx v24, v8, a3 -; RV32-NEXT: addi a4, sp, 8 -; RV32-NEXT: vlse64.v v0, (a4), zero -; RV32-NEXT: vand.vx v24, v24, a1 -; RV32-NEXT: vor.vv v16, v24, v16 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 3 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV32-NEXT: vsrl.vi v24, v8, 8 -; RV32-NEXT: vand.vv v24, v24, v0 +; RV32-NEXT: vsrl.vx v16, v8, a3 +; RV32-NEXT: vand.vx v16, v16, a1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsrl.vx v0, v8, a2 +; RV32-NEXT: vor.vv v16, v16, v0 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 3 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v0, v8, 8 +; RV32-NEXT: vand.vv v24, v0, v24 ; RV32-NEXT: vsrl.vi v0, v8, 24 ; RV32-NEXT: vand.vx v0, v0, a0 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vor.vv v24, v24, v0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 @@ -602,13 +592,11 @@ ; RV32-NEXT: vsll.vx v0, v8, a3 ; RV32-NEXT: vand.vv v16, v0, v16 ; RV32-NEXT: vsll.vx v0, v8, a2 -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vlse64.v v24, (a1), zero ; RV32-NEXT: vor.vv v16, v0, v16 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v0, (a0), zero +; RV32-NEXT: vlse64.v v0, (a1), zero ; RV32-NEXT: vsll.vi v16, v8, 8 ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vsll.vi v8, v8, 24 diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll @@ -302,12 +302,11 @@ ; RV32-NEXT: vl8re32.v v24, (a3) ; RV32-NEXT: vl8re32.v v0, (a0) ; RV32-NEXT: addi a0, sp, 128 -; RV32-NEXT: add a0, a0, a1 -; RV32-NEXT: vs8r.v v16, (a0) +; RV32-NEXT: vs8r.v v8, (a0) +; RV32-NEXT: add a1, a0, a1 ; RV32-NEXT: addi a0, sp, 128 ; RV32-NEXT: li a3, 2 -; RV32-NEXT: addi a1, sp, 128 -; RV32-NEXT: vs8r.v v8, (a1) +; RV32-NEXT: vs8r.v v16, (a1) ; RV32-NEXT: vmv8r.v v8, v0 ; RV32-NEXT: vmv8r.v v16, v24 ; RV32-NEXT: call ext2@plt @@ -334,12 +333,11 @@ ; RV64-NEXT: vl8re32.v v24, (a3) ; RV64-NEXT: vl8re32.v v0, (a0) ; RV64-NEXT: addi a0, sp, 128 -; RV64-NEXT: add a0, a0, a1 -; RV64-NEXT: vs8r.v v16, (a0) +; RV64-NEXT: vs8r.v v8, (a0) +; RV64-NEXT: add a1, a0, a1 ; RV64-NEXT: addi a0, sp, 128 ; RV64-NEXT: li a3, 2 -; RV64-NEXT: addi a1, sp, 128 -; RV64-NEXT: vs8r.v v8, (a1) +; RV64-NEXT: vs8r.v v16, (a1) ; RV64-NEXT: vmv8r.v v8, v0 ; RV64-NEXT: vmv8r.v v16, v24 ; RV64-NEXT: call ext2@plt @@ -376,43 +374,36 @@ ; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill ; RV32-NEXT: add a3, a0, a1 ; RV32-NEXT: vl8re32.v v24, (a3) +; RV32-NEXT: addi a3, sp, 128 +; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill ; RV32-NEXT: vl8re32.v v0, (a2) -; RV32-NEXT: addi a2, sp, 128 -; RV32-NEXT: vs8r.v v0, (a2) # Unknown-size Folded Spill -; RV32-NEXT: vl8re32.v v0, (a0) +; RV32-NEXT: vl8re32.v v24, (a0) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 128 +; RV32-NEXT: vs8r.v v8, (a0) +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 5 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 128 +; RV32-NEXT: vs8r.v v24, (a2) ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: vs8r.v v16, (a0) +; RV32-NEXT: add a1, a2, a1 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 128 -; RV32-NEXT: add a0, a0, a1 -; RV32-NEXT: vs8r.v v24, (a0) -; RV32-NEXT: csrr a0, vlenb -; RV32-NEXT: slli a0, a0, 4 -; RV32-NEXT: add a0, sp, a0 -; RV32-NEXT: addi a0, a0, 128 -; RV32-NEXT: vs8r.v v8, (a0) -; RV32-NEXT: csrr a0, vlenb -; RV32-NEXT: slli a0, a0, 5 -; RV32-NEXT: add a0, sp, a0 -; RV32-NEXT: addi a0, a0, 128 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 4 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a2, a1, 128 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 4 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 128 ; RV32-NEXT: li a5, 42 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 5 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 128 -; RV32-NEXT: vs8r.v v0, (a1) -; RV32-NEXT: addi a1, sp, 128 -; RV32-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; RV32-NEXT: addi a3, sp, 128 +; RV32-NEXT: vl8re8.v v8, (a3) # Unknown-size Folded Reload +; RV32-NEXT: vs8r.v v8, (a1) +; RV32-NEXT: vmv8r.v v8, v0 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, sp, a1 @@ -448,43 +439,36 @@ ; RV64-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill ; RV64-NEXT: add a3, a0, a1 ; RV64-NEXT: vl8re32.v v24, (a3) +; RV64-NEXT: addi a3, sp, 128 +; RV64-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill ; RV64-NEXT: vl8re32.v v0, (a2) -; RV64-NEXT: addi a2, sp, 128 -; RV64-NEXT: vs8r.v v0, (a2) # Unknown-size Folded Spill -; RV64-NEXT: vl8re32.v v0, (a0) +; RV64-NEXT: vl8re32.v v24, (a0) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 128 +; RV64-NEXT: vs8r.v v8, (a0) +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: slli a2, a2, 5 +; RV64-NEXT: add a2, sp, a2 +; RV64-NEXT: addi a2, a2, 128 +; RV64-NEXT: vs8r.v v24, (a2) ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vs8r.v v16, (a0) +; RV64-NEXT: add a1, a2, a1 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 5 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 128 -; RV64-NEXT: add a0, a0, a1 -; RV64-NEXT: vs8r.v v24, (a0) -; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 4 -; RV64-NEXT: add a0, sp, a0 -; RV64-NEXT: addi a0, a0, 128 -; RV64-NEXT: vs8r.v v8, (a0) -; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 5 -; RV64-NEXT: add a0, sp, a0 -; RV64-NEXT: addi a0, a0, 128 -; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 4 -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: addi a2, a1, 128 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: slli a2, a2, 4 +; RV64-NEXT: add a2, sp, a2 +; RV64-NEXT: addi a2, a2, 128 ; RV64-NEXT: li a5, 42 -; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 5 -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: addi a1, a1, 128 -; RV64-NEXT: vs8r.v v0, (a1) -; RV64-NEXT: addi a1, sp, 128 -; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: addi a3, sp, 128 +; RV64-NEXT: vl8re8.v v8, (a3) # Unknown-size Folded Reload +; RV64-NEXT: vs8r.v v8, (a1) +; RV64-NEXT: vmv8r.v v8, v0 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 @@ -534,19 +518,18 @@ ; RV32-NEXT: andi sp, sp, -128 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: addi a1, sp, 128 +; RV32-NEXT: vs8r.v v8, (a1) +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 4 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 128 +; RV32-NEXT: vs8r.v v8, (a2) ; RV32-NEXT: add a1, a1, a0 -; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, mu -; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: vs8r.v v8, (a1) -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 4 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 128 -; RV32-NEXT: add a0, a1, a0 -; RV32-NEXT: vs8r.v v8, (a0) -; RV32-NEXT: addi a0, sp, 128 -; RV32-NEXT: vs8r.v v8, (a0) +; RV32-NEXT: add a0, a2, a0 ; RV32-NEXT: li a1, 1 ; RV32-NEXT: li a2, 2 ; RV32-NEXT: li a3, 3 @@ -554,16 +537,12 @@ ; RV32-NEXT: li a5, 5 ; RV32-NEXT: li a6, 6 ; RV32-NEXT: li a7, 7 -; RV32-NEXT: csrr a0, vlenb -; RV32-NEXT: slli a0, a0, 4 -; RV32-NEXT: add a0, sp, a0 -; RV32-NEXT: addi t2, a0, 128 +; RV32-NEXT: csrr t0, vlenb +; RV32-NEXT: slli t0, t0, 4 +; RV32-NEXT: add t0, sp, t0 +; RV32-NEXT: addi t2, t0, 128 ; RV32-NEXT: addi t4, sp, 128 ; RV32-NEXT: li t6, 8 -; RV32-NEXT: csrr a0, vlenb -; RV32-NEXT: slli a0, a0, 4 -; RV32-NEXT: add a0, sp, a0 -; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: li a0, 0 ; RV32-NEXT: vmv.v.i v16, 0 @@ -587,19 +566,18 @@ ; RV64-NEXT: andi sp, sp, -128 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV64-NEXT: vmv.v.i v8, 0 ; RV64-NEXT: addi a1, sp, 128 +; RV64-NEXT: vs8r.v v8, (a1) +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: slli a2, a2, 4 +; RV64-NEXT: add a2, sp, a2 +; RV64-NEXT: addi a2, a2, 128 +; RV64-NEXT: vs8r.v v8, (a2) ; RV64-NEXT: add a1, a1, a0 -; RV64-NEXT: vsetvli a2, zero, e32, m8, ta, mu -; RV64-NEXT: vmv.v.i v8, 0 ; RV64-NEXT: vs8r.v v8, (a1) -; RV64-NEXT: csrr a1, vlenb -; RV64-NEXT: slli a1, a1, 4 -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: addi a1, a1, 128 -; RV64-NEXT: add a0, a1, a0 -; RV64-NEXT: vs8r.v v8, (a0) -; RV64-NEXT: addi a0, sp, 128 -; RV64-NEXT: vs8r.v v8, (a0) +; RV64-NEXT: add a0, a2, a0 ; RV64-NEXT: li a1, 1 ; RV64-NEXT: li a2, 2 ; RV64-NEXT: li a3, 3 @@ -607,16 +585,12 @@ ; RV64-NEXT: li a5, 5 ; RV64-NEXT: li a6, 6 ; RV64-NEXT: li a7, 7 -; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 4 -; RV64-NEXT: add a0, sp, a0 -; RV64-NEXT: addi t2, a0, 128 +; RV64-NEXT: csrr t0, vlenb +; RV64-NEXT: slli t0, t0, 4 +; RV64-NEXT: add t0, sp, t0 +; RV64-NEXT: addi t2, t0, 128 ; RV64-NEXT: addi t4, sp, 128 ; RV64-NEXT: li t6, 8 -; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 4 -; RV64-NEXT: add a0, sp, a0 -; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: li a0, 0 ; RV64-NEXT: vmv.v.i v16, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll @@ -33,13 +33,12 @@ ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -128 -; RV32-NEXT: csrr a0, vlenb -; RV32-NEXT: slli a0, a0, 3 -; RV32-NEXT: addi a1, sp, 128 -; RV32-NEXT: add a0, a1, a0 -; RV32-NEXT: vs8r.v v16, (a0) ; RV32-NEXT: addi a0, sp, 128 ; RV32-NEXT: vs8r.v v8, (a0) +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 3 +; RV32-NEXT: add a0, a0, a1 +; RV32-NEXT: vs8r.v v16, (a0) ; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: addi a0, sp, 128 @@ -62,13 +61,12 @@ ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -128 -; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 3 -; RV64-NEXT: addi a1, sp, 128 -; RV64-NEXT: add a0, a1, a0 -; RV64-NEXT: vs8r.v v16, (a0) ; RV64-NEXT: addi a0, sp, 128 ; RV64-NEXT: vs8r.v v8, (a0) +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a0, a0, a1 +; RV64-NEXT: vs8r.v v16, (a0) ; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; RV64-NEXT: vmv.v.i v8, 0 ; RV64-NEXT: addi a0, sp, 128 diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll @@ -1255,7 +1255,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vnot.v v8, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsrl.vi v11, v8, 1 ; RV32-NEXT: vand.vv v9, v11, v9 @@ -1264,9 +1263,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vadd.vv v8, v9, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsrl.vi v11, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v11 @@ -1359,7 +1356,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vnot.v v8, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsrl.vi v14, v8, 1 ; RV32-NEXT: vand.vv v10, v14, v10 @@ -1368,9 +1364,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vadd.vv v8, v10, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsrl.vi v14, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v14 @@ -1463,7 +1457,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vnot.v v8, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsrl.vi v20, v8, 1 ; RV32-NEXT: vand.vv v12, v20, v12 @@ -1472,9 +1465,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vadd.vv v8, v12, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsrl.vi v20, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v20 @@ -1567,7 +1558,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vnot.v v8, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsrl.vi v0, v8, 1 ; RV32-NEXT: vand.vv v16, v0, v16 @@ -1576,9 +1566,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v24 ; RV32-NEXT: vadd.vv v8, v16, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsrl.vi v0, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v0 @@ -2829,7 +2817,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vnot.v v8, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsrl.vi v11, v8, 1 ; RV32-NEXT: vand.vv v9, v11, v9 @@ -2838,9 +2825,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vadd.vv v8, v9, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsrl.vi v11, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v11 @@ -2932,7 +2917,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vnot.v v8, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsrl.vi v14, v8, 1 ; RV32-NEXT: vand.vv v10, v14, v10 @@ -2941,9 +2925,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vadd.vv v8, v10, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsrl.vi v14, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v14 @@ -3035,7 +3017,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vnot.v v8, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsrl.vi v20, v8, 1 ; RV32-NEXT: vand.vv v12, v20, v12 @@ -3044,9 +3025,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vadd.vv v8, v12, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsrl.vi v20, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v20 @@ -3138,7 +3117,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vnot.v v8, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsrl.vi v0, v8, 1 ; RV32-NEXT: vand.vv v16, v0, v16 @@ -3147,9 +3125,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v24 ; RV32-NEXT: vadd.vv v8, v16, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsrl.vi v0, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll @@ -773,7 +773,6 @@ ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsrl.vi v11, v8, 1 ; RV32-NEXT: vand.vv v9, v11, v9 @@ -782,9 +781,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vadd.vv v8, v9, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsrl.vi v11, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v11 @@ -849,7 +846,6 @@ ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsrl.vi v14, v8, 1 ; RV32-NEXT: vand.vv v10, v14, v10 @@ -858,9 +854,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vadd.vv v8, v10, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsrl.vi v14, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v14 @@ -925,7 +919,6 @@ ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsrl.vi v20, v8, 1 ; RV32-NEXT: vand.vv v12, v20, v12 @@ -934,9 +927,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vadd.vv v8, v12, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsrl.vi v20, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v20 @@ -1001,7 +992,6 @@ ; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsrl.vi v0, v8, 1 ; RV32-NEXT: vand.vv v16, v0, v16 @@ -1010,9 +1000,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v24 ; RV32-NEXT: vadd.vv v8, v16, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsrl.vi v0, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll @@ -1127,7 +1127,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v9 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsrl.vi v11, v8, 1 ; RV32-NEXT: vand.vv v10, v11, v10 @@ -1136,9 +1135,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: vadd.vv v8, v10, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsrl.vi v11, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v11 @@ -1211,7 +1208,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v10 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsrl.vi v14, v8, 1 ; RV32-NEXT: vand.vv v12, v14, v12 @@ -1220,9 +1216,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vadd.vv v8, v12, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsrl.vi v14, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v14 @@ -1295,7 +1289,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v12 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsrl.vi v20, v8, 1 ; RV32-NEXT: vand.vv v16, v20, v16 @@ -1304,9 +1297,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vadd.vv v8, v16, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsrl.vi v20, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v20 @@ -1379,7 +1370,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v16 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsrl.vi v0, v8, 1 ; RV32-NEXT: vand.vv v24, v0, v24 @@ -1388,9 +1378,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vadd.vv v8, v24, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsrl.vi v0, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v0 @@ -2503,7 +2491,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v9 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsrl.vi v11, v8, 1 ; RV32-NEXT: vand.vv v10, v11, v10 @@ -2512,9 +2499,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: vadd.vv v8, v10, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsrl.vi v11, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v11 @@ -2586,7 +2571,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v10 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsrl.vi v14, v8, 1 ; RV32-NEXT: vand.vv v12, v14, v12 @@ -2595,9 +2579,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vadd.vv v8, v12, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsrl.vi v14, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v14 @@ -2669,7 +2651,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v12 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsrl.vi v20, v8, 1 ; RV32-NEXT: vand.vv v16, v20, v16 @@ -2678,9 +2659,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vadd.vv v8, v16, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsrl.vi v20, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v20 @@ -2752,7 +2731,6 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v16 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsrl.vi v0, v8, 1 ; RV32-NEXT: vand.vv v24, v0, v24 @@ -2761,9 +2739,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vadd.vv v8, v24, v8 -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsrl.vi v0, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll @@ -155,8 +155,8 @@ ; RV64ELEN32: # %bb.0: ; RV64ELEN32-NEXT: addi sp, sp, -16 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16 -; RV64ELEN32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64ELEN32-NEXT: addi a0, sp, 8 +; RV64ELEN32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64ELEN32-NEXT: vse8.v v8, (a0) ; RV64ELEN32-NEXT: ld a0, 8(sp) ; RV64ELEN32-NEXT: addi sp, sp, 16 @@ -193,8 +193,8 @@ ; RV64ELEN32: # %bb.0: ; RV64ELEN32-NEXT: addi sp, sp, -16 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16 -; RV64ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64ELEN32-NEXT: addi a0, sp, 8 +; RV64ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64ELEN32-NEXT: vse16.v v8, (a0) ; RV64ELEN32-NEXT: ld a0, 8(sp) ; RV64ELEN32-NEXT: addi sp, sp, 16 @@ -231,8 +231,8 @@ ; RV64ELEN32: # %bb.0: ; RV64ELEN32-NEXT: addi sp, sp, -16 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16 -; RV64ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; RV64ELEN32-NEXT: addi a0, sp, 8 +; RV64ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; RV64ELEN32-NEXT: vse32.v v8, (a0) ; RV64ELEN32-NEXT: ld a0, 8(sp) ; RV64ELEN32-NEXT: addi sp, sp, 16 @@ -355,8 +355,8 @@ ; ELEN32: # %bb.0: ; ELEN32-NEXT: addi sp, sp, -16 ; ELEN32-NEXT: .cfi_def_cfa_offset 16 -; ELEN32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; ELEN32-NEXT: addi a0, sp, 8 +; ELEN32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; ELEN32-NEXT: vse8.v v8, (a0) ; ELEN32-NEXT: fld fa0, 8(sp) ; ELEN32-NEXT: addi sp, sp, 16 @@ -376,8 +376,8 @@ ; ELEN32: # %bb.0: ; ELEN32-NEXT: addi sp, sp, -16 ; ELEN32-NEXT: .cfi_def_cfa_offset 16 -; ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; ELEN32-NEXT: addi a0, sp, 8 +; ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; ELEN32-NEXT: vse16.v v8, (a0) ; ELEN32-NEXT: fld fa0, 8(sp) ; ELEN32-NEXT: addi sp, sp, 16 @@ -397,8 +397,8 @@ ; ELEN32: # %bb.0: ; ELEN32-NEXT: addi sp, sp, -16 ; ELEN32-NEXT: .cfi_def_cfa_offset 16 -; ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; ELEN32-NEXT: addi a0, sp, 8 +; ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; ELEN32-NEXT: vse32.v v8, (a0) ; ELEN32-NEXT: fld fa0, 8(sp) ; ELEN32-NEXT: addi sp, sp, 16 @@ -534,8 +534,8 @@ ; RV64ELEN32-NEXT: addi sp, sp, -16 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16 ; RV64ELEN32-NEXT: sd a0, 8(sp) -; RV64ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64ELEN32-NEXT: addi a0, sp, 8 +; RV64ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64ELEN32-NEXT: vle16.v v8, (a0) ; RV64ELEN32-NEXT: addi sp, sp, 16 ; RV64ELEN32-NEXT: ret @@ -573,8 +573,8 @@ ; RV64ELEN32-NEXT: addi sp, sp, -16 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16 ; RV64ELEN32-NEXT: sd a0, 8(sp) -; RV64ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; RV64ELEN32-NEXT: addi a0, sp, 8 +; RV64ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; RV64ELEN32-NEXT: vle32.v v8, (a0) ; RV64ELEN32-NEXT: addi sp, sp, 16 ; RV64ELEN32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll @@ -283,58 +283,58 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) { ; LMULMAX8-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi sp, sp, -384 -; LMULMAX8-NEXT: .cfi_def_cfa_offset 384 -; LMULMAX8-NEXT: sd ra, 376(sp) # 8-byte Folded Spill -; LMULMAX8-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; LMULMAX8-NEXT: addi sp, sp, -256 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 256 +; LMULMAX8-NEXT: sd ra, 248(sp) # 8-byte Folded Spill +; LMULMAX8-NEXT: sd s0, 240(sp) # 8-byte Folded Spill ; LMULMAX8-NEXT: .cfi_offset ra, -8 ; LMULMAX8-NEXT: .cfi_offset s0, -16 -; LMULMAX8-NEXT: addi s0, sp, 384 +; LMULMAX8-NEXT: addi s0, sp, 256 ; LMULMAX8-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX8-NEXT: andi sp, sp, -128 ; LMULMAX8-NEXT: li a2, 32 ; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v24, (a0) -; LMULMAX8-NEXT: addi a0, sp, 128 +; LMULMAX8-NEXT: mv a3, sp +; LMULMAX8-NEXT: mv a0, sp ; LMULMAX8-NEXT: li a2, 42 -; LMULMAX8-NEXT: addi a3, sp, 128 ; LMULMAX8-NEXT: vse32.v v8, (a3) ; LMULMAX8-NEXT: vmv.v.v v8, v24 ; LMULMAX8-NEXT: call ext3@plt -; LMULMAX8-NEXT: addi sp, s0, -384 -; LMULMAX8-NEXT: ld ra, 376(sp) # 8-byte Folded Reload -; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload -; LMULMAX8-NEXT: addi sp, sp, 384 +; LMULMAX8-NEXT: addi sp, s0, -256 +; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: addi sp, sp, 256 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: addi sp, sp, -384 -; LMULMAX4-NEXT: .cfi_def_cfa_offset 384 -; LMULMAX4-NEXT: sd ra, 376(sp) # 8-byte Folded Spill -; LMULMAX4-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; LMULMAX4-NEXT: addi sp, sp, -256 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 256 +; LMULMAX4-NEXT: sd ra, 248(sp) # 8-byte Folded Spill +; LMULMAX4-NEXT: sd s0, 240(sp) # 8-byte Folded Spill ; LMULMAX4-NEXT: .cfi_offset ra, -8 ; LMULMAX4-NEXT: .cfi_offset s0, -16 -; LMULMAX4-NEXT: addi s0, sp, 384 +; LMULMAX4-NEXT: addi s0, sp, 256 ; LMULMAX4-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX4-NEXT: andi sp, sp, -128 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; LMULMAX4-NEXT: vle32.v v24, (a0) ; LMULMAX4-NEXT: addi a0, a0, 64 ; LMULMAX4-NEXT: vle32.v v28, (a0) -; LMULMAX4-NEXT: addi a0, sp, 192 +; LMULMAX4-NEXT: addi a0, sp, 64 ; LMULMAX4-NEXT: vse32.v v12, (a0) -; LMULMAX4-NEXT: addi a0, sp, 128 +; LMULMAX4-NEXT: mv a1, sp +; LMULMAX4-NEXT: mv a0, sp ; LMULMAX4-NEXT: li a3, 42 -; LMULMAX4-NEXT: addi a1, sp, 128 ; LMULMAX4-NEXT: vse32.v v8, (a1) ; LMULMAX4-NEXT: vmv.v.v v8, v24 ; LMULMAX4-NEXT: vmv.v.v v12, v28 ; LMULMAX4-NEXT: call ext3@plt -; LMULMAX4-NEXT: addi sp, s0, -384 -; LMULMAX4-NEXT: ld ra, 376(sp) # 8-byte Folded Reload -; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload -; LMULMAX4-NEXT: addi sp, sp, 384 +; LMULMAX4-NEXT: addi sp, s0, -256 +; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: addi sp, sp, 256 ; LMULMAX4-NEXT: ret %t = call fastcc <32 x i32> @ext3(<32 x i32> %z, <32 x i32> %y, <32 x i32> %x, i32 %w, i32 42) ret <32 x i32> %t @@ -369,18 +369,19 @@ define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z) { ; LMULMAX8-LABEL: pass_vector_arg_indirect_stack: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi sp, sp, -384 -; LMULMAX8-NEXT: .cfi_def_cfa_offset 384 -; LMULMAX8-NEXT: sd ra, 376(sp) # 8-byte Folded Spill -; LMULMAX8-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; LMULMAX8-NEXT: addi sp, sp, -256 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 256 +; LMULMAX8-NEXT: sd ra, 248(sp) # 8-byte Folded Spill +; LMULMAX8-NEXT: sd s0, 240(sp) # 8-byte Folded Spill ; LMULMAX8-NEXT: .cfi_offset ra, -8 ; LMULMAX8-NEXT: .cfi_offset s0, -16 -; LMULMAX8-NEXT: addi s0, sp, 384 +; LMULMAX8-NEXT: addi s0, sp, 256 ; LMULMAX8-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX8-NEXT: andi sp, sp, -128 ; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, 0 +; LMULMAX8-NEXT: mv a0, sp ; LMULMAX8-NEXT: li a1, 1 ; LMULMAX8-NEXT: li a2, 2 ; LMULMAX8-NEXT: li a3, 3 @@ -388,34 +389,34 @@ ; LMULMAX8-NEXT: li a5, 5 ; LMULMAX8-NEXT: li a6, 6 ; LMULMAX8-NEXT: li a7, 7 -; LMULMAX8-NEXT: addi t2, sp, 128 +; LMULMAX8-NEXT: mv t2, sp ; LMULMAX8-NEXT: li t3, 8 -; LMULMAX8-NEXT: addi a0, sp, 128 ; LMULMAX8-NEXT: vse32.v v8, (a0) ; LMULMAX8-NEXT: li a0, 0 ; LMULMAX8-NEXT: vmv.v.i v16, 0 ; LMULMAX8-NEXT: call vector_arg_indirect_stack@plt -; LMULMAX8-NEXT: addi sp, s0, -384 -; LMULMAX8-NEXT: ld ra, 376(sp) # 8-byte Folded Reload -; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload -; LMULMAX8-NEXT: addi sp, sp, 384 +; LMULMAX8-NEXT: addi sp, s0, -256 +; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: addi sp, sp, 256 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: pass_vector_arg_indirect_stack: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: addi sp, sp, -384 -; LMULMAX4-NEXT: .cfi_def_cfa_offset 384 -; LMULMAX4-NEXT: sd ra, 376(sp) # 8-byte Folded Spill -; LMULMAX4-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; LMULMAX4-NEXT: addi sp, sp, -256 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 256 +; LMULMAX4-NEXT: sd ra, 248(sp) # 8-byte Folded Spill +; LMULMAX4-NEXT: sd s0, 240(sp) # 8-byte Folded Spill ; LMULMAX4-NEXT: .cfi_offset ra, -8 ; LMULMAX4-NEXT: .cfi_offset s0, -16 -; LMULMAX4-NEXT: addi s0, sp, 384 +; LMULMAX4-NEXT: addi s0, sp, 256 ; LMULMAX4-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX4-NEXT: andi sp, sp, -128 -; LMULMAX4-NEXT: addi a0, sp, 192 +; LMULMAX4-NEXT: addi a0, sp, 64 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; LMULMAX4-NEXT: vmv.v.i v8, 0 ; LMULMAX4-NEXT: vse32.v v8, (a0) +; LMULMAX4-NEXT: mv a0, sp ; LMULMAX4-NEXT: li a1, 1 ; LMULMAX4-NEXT: li a2, 2 ; LMULMAX4-NEXT: li a3, 3 @@ -423,19 +424,18 @@ ; LMULMAX4-NEXT: li a5, 5 ; LMULMAX4-NEXT: li a6, 6 ; LMULMAX4-NEXT: li a7, 7 -; LMULMAX4-NEXT: addi t2, sp, 128 +; LMULMAX4-NEXT: mv t2, sp ; LMULMAX4-NEXT: li t4, 8 -; LMULMAX4-NEXT: addi a0, sp, 128 ; LMULMAX4-NEXT: vse32.v v8, (a0) ; LMULMAX4-NEXT: li a0, 0 ; LMULMAX4-NEXT: vmv.v.i v12, 0 ; LMULMAX4-NEXT: vmv.v.i v16, 0 ; LMULMAX4-NEXT: vmv.v.i v20, 0 ; LMULMAX4-NEXT: call vector_arg_indirect_stack@plt -; LMULMAX4-NEXT: addi sp, s0, -384 -; LMULMAX4-NEXT: ld ra, 376(sp) # 8-byte Folded Reload -; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload -; LMULMAX4-NEXT: addi sp, sp, 384 +; LMULMAX4-NEXT: addi sp, s0, -256 +; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: addi sp, sp, 256 ; LMULMAX4-NEXT: ret %s = call fastcc <32 x i32> @vector_arg_indirect_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8) ret <32 x i32> %s @@ -445,31 +445,25 @@ define fastcc <32 x i32> @vector_arg_direct_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %last) { ; LMULMAX8-LABEL: vector_arg_direct_stack: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi sp, sp, -16 -; LMULMAX8-NEXT: .cfi_def_cfa_offset 16 ; LMULMAX8-NEXT: li a0, 32 +; LMULMAX8-NEXT: addi a1, sp, 8 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; LMULMAX8-NEXT: addi a0, sp, 24 -; LMULMAX8-NEXT: vle32.v v24, (a0) +; LMULMAX8-NEXT: vle32.v v24, (a1) ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 ; LMULMAX8-NEXT: vadd.vv v8, v8, v24 -; LMULMAX8-NEXT: addi sp, sp, 16 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: vector_arg_direct_stack: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: addi sp, sp, -16 -; LMULMAX4-NEXT: .cfi_def_cfa_offset 16 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu -; LMULMAX4-NEXT: addi a0, sp, 24 +; LMULMAX4-NEXT: addi a0, sp, 8 ; LMULMAX4-NEXT: vle32.v v24, (a0) -; LMULMAX4-NEXT: addi a0, sp, 88 +; LMULMAX4-NEXT: addi a0, sp, 72 ; LMULMAX4-NEXT: vle32.v v28, (a0) ; LMULMAX4-NEXT: vadd.vv v12, v12, v20 ; LMULMAX4-NEXT: vadd.vv v8, v8, v16 ; LMULMAX4-NEXT: vadd.vv v8, v8, v24 ; LMULMAX4-NEXT: vadd.vv v12, v12, v28 -; LMULMAX4-NEXT: addi sp, sp, 16 ; LMULMAX4-NEXT: ret %s = add <32 x i32> %x, %y %t = add <32 x i32> %s, %z @@ -557,13 +551,10 @@ define fastcc <4 x i1> @vector_mask_arg_direct_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, <4 x i1> %m1, <4 x i1> %m2, i32 %last) { ; CHECK-LABEL: vector_mask_arg_direct_stack: ; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: addi a0, sp, 136 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu -; CHECK-NEXT: addi a0, sp, 152 ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %r = xor <4 x i1> %m1, %m2 ret <4 x i1> %r diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll @@ -782,69 +782,69 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) { ; LMULMAX8-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi sp, sp, -384 -; LMULMAX8-NEXT: .cfi_def_cfa_offset 384 -; LMULMAX8-NEXT: sd ra, 376(sp) # 8-byte Folded Spill -; LMULMAX8-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; LMULMAX8-NEXT: addi sp, sp, -256 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 256 +; LMULMAX8-NEXT: sd ra, 248(sp) # 8-byte Folded Spill +; LMULMAX8-NEXT: sd s0, 240(sp) # 8-byte Folded Spill ; LMULMAX8-NEXT: .cfi_offset ra, -8 ; LMULMAX8-NEXT: .cfi_offset s0, -16 -; LMULMAX8-NEXT: addi s0, sp, 384 +; LMULMAX8-NEXT: addi s0, sp, 256 ; LMULMAX8-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX8-NEXT: andi sp, sp, -128 ; LMULMAX8-NEXT: li a2, 32 ; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v24, (a0) -; LMULMAX8-NEXT: addi a0, sp, 128 +; LMULMAX8-NEXT: mv a3, sp +; LMULMAX8-NEXT: mv a0, sp ; LMULMAX8-NEXT: li a2, 42 -; LMULMAX8-NEXT: addi a3, sp, 128 ; LMULMAX8-NEXT: vse32.v v8, (a3) ; LMULMAX8-NEXT: vmv.v.v v8, v24 ; LMULMAX8-NEXT: call ext3@plt -; LMULMAX8-NEXT: addi sp, s0, -384 -; LMULMAX8-NEXT: ld ra, 376(sp) # 8-byte Folded Reload -; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload -; LMULMAX8-NEXT: addi sp, sp, 384 +; LMULMAX8-NEXT: addi sp, s0, -256 +; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: addi sp, sp, 256 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: addi sp, sp, -384 -; LMULMAX4-NEXT: .cfi_def_cfa_offset 384 -; LMULMAX4-NEXT: sd ra, 376(sp) # 8-byte Folded Spill -; LMULMAX4-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; LMULMAX4-NEXT: addi sp, sp, -256 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 256 +; LMULMAX4-NEXT: sd ra, 248(sp) # 8-byte Folded Spill +; LMULMAX4-NEXT: sd s0, 240(sp) # 8-byte Folded Spill ; LMULMAX4-NEXT: .cfi_offset ra, -8 ; LMULMAX4-NEXT: .cfi_offset s0, -16 -; LMULMAX4-NEXT: addi s0, sp, 384 +; LMULMAX4-NEXT: addi s0, sp, 256 ; LMULMAX4-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX4-NEXT: andi sp, sp, -128 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; LMULMAX4-NEXT: vle32.v v24, (a0) ; LMULMAX4-NEXT: addi a0, a0, 64 ; LMULMAX4-NEXT: vle32.v v28, (a0) -; LMULMAX4-NEXT: addi a0, sp, 192 +; LMULMAX4-NEXT: addi a0, sp, 64 ; LMULMAX4-NEXT: vse32.v v12, (a0) -; LMULMAX4-NEXT: addi a0, sp, 128 +; LMULMAX4-NEXT: mv a1, sp +; LMULMAX4-NEXT: mv a0, sp ; LMULMAX4-NEXT: li a3, 42 -; LMULMAX4-NEXT: addi a1, sp, 128 ; LMULMAX4-NEXT: vse32.v v8, (a1) ; LMULMAX4-NEXT: vmv.v.v v8, v24 ; LMULMAX4-NEXT: vmv.v.v v12, v28 ; LMULMAX4-NEXT: call ext3@plt -; LMULMAX4-NEXT: addi sp, s0, -384 -; LMULMAX4-NEXT: ld ra, 376(sp) # 8-byte Folded Reload -; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload -; LMULMAX4-NEXT: addi sp, sp, 384 +; LMULMAX4-NEXT: addi sp, s0, -256 +; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: addi sp, sp, 256 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi sp, sp, -384 -; LMULMAX2-NEXT: .cfi_def_cfa_offset 384 -; LMULMAX2-NEXT: sd ra, 376(sp) # 8-byte Folded Spill -; LMULMAX2-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; LMULMAX2-NEXT: addi sp, sp, -256 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 256 +; LMULMAX2-NEXT: sd ra, 248(sp) # 8-byte Folded Spill +; LMULMAX2-NEXT: sd s0, 240(sp) # 8-byte Folded Spill ; LMULMAX2-NEXT: .cfi_offset ra, -8 ; LMULMAX2-NEXT: .cfi_offset s0, -16 -; LMULMAX2-NEXT: addi s0, sp, 384 +; LMULMAX2-NEXT: addi s0, sp, 256 ; LMULMAX2-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX2-NEXT: andi sp, sp, -128 ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu @@ -855,38 +855,38 @@ ; LMULMAX2-NEXT: vle32.v v28, (a1) ; LMULMAX2-NEXT: addi a0, a0, 96 ; LMULMAX2-NEXT: vle32.v v30, (a0) -; LMULMAX2-NEXT: addi a0, sp, 224 +; LMULMAX2-NEXT: addi a0, sp, 96 ; LMULMAX2-NEXT: vse32.v v14, (a0) -; LMULMAX2-NEXT: addi a0, sp, 192 +; LMULMAX2-NEXT: addi a0, sp, 64 ; LMULMAX2-NEXT: vse32.v v12, (a0) -; LMULMAX2-NEXT: addi a0, sp, 160 +; LMULMAX2-NEXT: addi a0, sp, 32 ; LMULMAX2-NEXT: vse32.v v10, (a0) -; LMULMAX2-NEXT: addi a0, sp, 128 +; LMULMAX2-NEXT: mv a1, sp +; LMULMAX2-NEXT: mv a0, sp ; LMULMAX2-NEXT: li a5, 42 -; LMULMAX2-NEXT: addi a1, sp, 128 ; LMULMAX2-NEXT: vse32.v v8, (a1) ; LMULMAX2-NEXT: vmv.v.v v8, v24 ; LMULMAX2-NEXT: vmv.v.v v10, v26 ; LMULMAX2-NEXT: vmv.v.v v12, v28 ; LMULMAX2-NEXT: vmv.v.v v14, v30 ; LMULMAX2-NEXT: call ext3@plt -; LMULMAX2-NEXT: addi sp, s0, -384 -; LMULMAX2-NEXT: ld ra, 376(sp) # 8-byte Folded Reload -; LMULMAX2-NEXT: ld s0, 368(sp) # 8-byte Folded Reload -; LMULMAX2-NEXT: addi sp, sp, 384 +; LMULMAX2-NEXT: addi sp, s0, -256 +; LMULMAX2-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; LMULMAX2-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; LMULMAX2-NEXT: addi sp, sp, 256 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi sp, sp, -384 -; LMULMAX1-NEXT: .cfi_def_cfa_offset 384 -; LMULMAX1-NEXT: sd ra, 376(sp) # 8-byte Folded Spill -; LMULMAX1-NEXT: sd s0, 368(sp) # 8-byte Folded Spill -; LMULMAX1-NEXT: sd s1, 360(sp) # 8-byte Folded Spill +; LMULMAX1-NEXT: addi sp, sp, -256 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 256 +; LMULMAX1-NEXT: sd ra, 248(sp) # 8-byte Folded Spill +; LMULMAX1-NEXT: sd s0, 240(sp) # 8-byte Folded Spill +; LMULMAX1-NEXT: sd s1, 232(sp) # 8-byte Folded Spill ; LMULMAX1-NEXT: .cfi_offset ra, -8 ; LMULMAX1-NEXT: .cfi_offset s0, -16 ; LMULMAX1-NEXT: .cfi_offset s1, -24 -; LMULMAX1-NEXT: addi s0, sp, 384 +; LMULMAX1-NEXT: addi s0, sp, 256 ; LMULMAX1-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX1-NEXT: andi sp, sp, -128 ; LMULMAX1-NEXT: mv s1, sp @@ -906,28 +906,28 @@ ; LMULMAX1-NEXT: vle32.v v30, (a1) ; LMULMAX1-NEXT: addi a0, a0, 112 ; LMULMAX1-NEXT: vle32.v v31, (a0) -; LMULMAX1-NEXT: ld a0, 0(s0) +; LMULMAX1-NEXT: ld a1, 0(s0) ; LMULMAX1-NEXT: addi sp, sp, -16 -; LMULMAX1-NEXT: addi a1, s1, 240 -; LMULMAX1-NEXT: vse32.v v15, (a1) -; LMULMAX1-NEXT: addi a1, s1, 224 -; LMULMAX1-NEXT: vse32.v v14, (a1) -; LMULMAX1-NEXT: addi a1, s1, 208 -; LMULMAX1-NEXT: vse32.v v13, (a1) -; LMULMAX1-NEXT: addi a1, s1, 192 -; LMULMAX1-NEXT: vse32.v v12, (a1) -; LMULMAX1-NEXT: addi a1, s1, 176 -; LMULMAX1-NEXT: vse32.v v11, (a1) -; LMULMAX1-NEXT: addi a1, s1, 160 -; LMULMAX1-NEXT: vse32.v v10, (a1) -; LMULMAX1-NEXT: addi a1, s1, 144 -; LMULMAX1-NEXT: vse32.v v9, (a1) -; LMULMAX1-NEXT: li a1, 42 -; LMULMAX1-NEXT: sd a1, 8(sp) -; LMULMAX1-NEXT: sd a0, 0(sp) -; LMULMAX1-NEXT: addi a0, s1, 128 -; LMULMAX1-NEXT: addi a1, s1, 128 -; LMULMAX1-NEXT: vse32.v v8, (a1) +; LMULMAX1-NEXT: addi a0, s1, 112 +; LMULMAX1-NEXT: vse32.v v15, (a0) +; LMULMAX1-NEXT: addi a0, s1, 96 +; LMULMAX1-NEXT: vse32.v v14, (a0) +; LMULMAX1-NEXT: addi a0, s1, 80 +; LMULMAX1-NEXT: vse32.v v13, (a0) +; LMULMAX1-NEXT: addi a0, s1, 64 +; LMULMAX1-NEXT: vse32.v v12, (a0) +; LMULMAX1-NEXT: addi a0, s1, 48 +; LMULMAX1-NEXT: vse32.v v11, (a0) +; LMULMAX1-NEXT: addi a0, s1, 32 +; LMULMAX1-NEXT: vse32.v v10, (a0) +; LMULMAX1-NEXT: addi a0, s1, 16 +; LMULMAX1-NEXT: vse32.v v9, (a0) +; LMULMAX1-NEXT: mv a0, s1 +; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: li a0, 42 +; LMULMAX1-NEXT: sd a0, 8(sp) +; LMULMAX1-NEXT: mv a0, s1 +; LMULMAX1-NEXT: sd a1, 0(sp) ; LMULMAX1-NEXT: vmv.v.v v8, v24 ; LMULMAX1-NEXT: vmv.v.v v9, v25 ; LMULMAX1-NEXT: vmv.v.v v10, v26 @@ -938,11 +938,11 @@ ; LMULMAX1-NEXT: vmv.v.v v15, v31 ; LMULMAX1-NEXT: call ext3@plt ; LMULMAX1-NEXT: addi sp, sp, 16 -; LMULMAX1-NEXT: addi sp, s0, -384 -; LMULMAX1-NEXT: ld ra, 376(sp) # 8-byte Folded Reload -; LMULMAX1-NEXT: ld s0, 368(sp) # 8-byte Folded Reload -; LMULMAX1-NEXT: ld s1, 360(sp) # 8-byte Folded Reload -; LMULMAX1-NEXT: addi sp, sp, 384 +; LMULMAX1-NEXT: addi sp, s0, -256 +; LMULMAX1-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; LMULMAX1-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; LMULMAX1-NEXT: ld s1, 232(sp) # 8-byte Folded Reload +; LMULMAX1-NEXT: addi sp, sp, 256 ; LMULMAX1-NEXT: ret %t = call <32 x i32> @ext3(<32 x i32> %z, <32 x i32> %y, <32 x i32> %x, i32 %w, i32 42) ret <32 x i32> %t @@ -1022,13 +1022,13 @@ define <32 x i32> @call_split_vector_args(<2 x i32>* %pa, <32 x i32>* %pb) { ; LMULMAX8-LABEL: call_split_vector_args: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi sp, sp, -384 -; LMULMAX8-NEXT: .cfi_def_cfa_offset 384 -; LMULMAX8-NEXT: sd ra, 376(sp) # 8-byte Folded Spill -; LMULMAX8-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; LMULMAX8-NEXT: addi sp, sp, -256 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 256 +; LMULMAX8-NEXT: sd ra, 248(sp) # 8-byte Folded Spill +; LMULMAX8-NEXT: sd s0, 240(sp) # 8-byte Folded Spill ; LMULMAX8-NEXT: .cfi_offset ra, -8 ; LMULMAX8-NEXT: .cfi_offset s0, -16 -; LMULMAX8-NEXT: addi s0, sp, 384 +; LMULMAX8-NEXT: addi s0, sp, 256 ; LMULMAX8-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX8-NEXT: andi sp, sp, -128 ; LMULMAX8-NEXT: vsetivli zero, 2, e32, mf2, ta, mu @@ -1036,29 +1036,29 @@ ; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v16, (a1) -; LMULMAX8-NEXT: addi a0, sp, 128 -; LMULMAX8-NEXT: addi a1, sp, 128 +; LMULMAX8-NEXT: mv a1, sp +; LMULMAX8-NEXT: mv a0, sp ; LMULMAX8-NEXT: vse32.v v16, (a1) ; LMULMAX8-NEXT: vmv1r.v v9, v8 ; LMULMAX8-NEXT: vmv1r.v v10, v8 ; LMULMAX8-NEXT: vmv1r.v v11, v8 ; LMULMAX8-NEXT: vmv1r.v v12, v8 ; LMULMAX8-NEXT: call split_vector_args@plt -; LMULMAX8-NEXT: addi sp, s0, -384 -; LMULMAX8-NEXT: ld ra, 376(sp) # 8-byte Folded Reload -; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload -; LMULMAX8-NEXT: addi sp, sp, 384 +; LMULMAX8-NEXT: addi sp, s0, -256 +; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: addi sp, sp, 256 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: call_split_vector_args: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: addi sp, sp, -384 -; LMULMAX4-NEXT: .cfi_def_cfa_offset 384 -; LMULMAX4-NEXT: sd ra, 376(sp) # 8-byte Folded Spill -; LMULMAX4-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; LMULMAX4-NEXT: addi sp, sp, -256 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 256 +; LMULMAX4-NEXT: sd ra, 248(sp) # 8-byte Folded Spill +; LMULMAX4-NEXT: sd s0, 240(sp) # 8-byte Folded Spill ; LMULMAX4-NEXT: .cfi_offset ra, -8 ; LMULMAX4-NEXT: .cfi_offset s0, -16 -; LMULMAX4-NEXT: addi s0, sp, 384 +; LMULMAX4-NEXT: addi s0, sp, 256 ; LMULMAX4-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX4-NEXT: andi sp, sp, -128 ; LMULMAX4-NEXT: vsetivli zero, 2, e32, mf2, ta, mu @@ -1067,31 +1067,31 @@ ; LMULMAX4-NEXT: vle32.v v16, (a1) ; LMULMAX4-NEXT: addi a0, a1, 64 ; LMULMAX4-NEXT: vle32.v v20, (a0) -; LMULMAX4-NEXT: addi a0, sp, 192 +; LMULMAX4-NEXT: addi a0, sp, 64 ; LMULMAX4-NEXT: vse32.v v20, (a0) -; LMULMAX4-NEXT: addi a0, sp, 128 -; LMULMAX4-NEXT: addi a1, sp, 128 +; LMULMAX4-NEXT: mv a1, sp +; LMULMAX4-NEXT: mv a0, sp ; LMULMAX4-NEXT: vse32.v v16, (a1) ; LMULMAX4-NEXT: vmv1r.v v9, v8 ; LMULMAX4-NEXT: vmv1r.v v10, v8 ; LMULMAX4-NEXT: vmv1r.v v11, v8 ; LMULMAX4-NEXT: vmv1r.v v12, v8 ; LMULMAX4-NEXT: call split_vector_args@plt -; LMULMAX4-NEXT: addi sp, s0, -384 -; LMULMAX4-NEXT: ld ra, 376(sp) # 8-byte Folded Reload -; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload -; LMULMAX4-NEXT: addi sp, sp, 384 +; LMULMAX4-NEXT: addi sp, s0, -256 +; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: addi sp, sp, 256 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: call_split_vector_args: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi sp, sp, -256 -; LMULMAX2-NEXT: .cfi_def_cfa_offset 256 -; LMULMAX2-NEXT: sd ra, 248(sp) # 8-byte Folded Spill -; LMULMAX2-NEXT: sd s0, 240(sp) # 8-byte Folded Spill +; LMULMAX2-NEXT: addi sp, sp, -128 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 128 +; LMULMAX2-NEXT: sd ra, 120(sp) # 8-byte Folded Spill +; LMULMAX2-NEXT: sd s0, 112(sp) # 8-byte Folded Spill ; LMULMAX2-NEXT: .cfi_offset ra, -8 ; LMULMAX2-NEXT: .cfi_offset s0, -16 -; LMULMAX2-NEXT: addi s0, sp, 256 +; LMULMAX2-NEXT: addi s0, sp, 128 ; LMULMAX2-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX2-NEXT: andi sp, sp, -128 ; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, mu @@ -1104,12 +1104,12 @@ ; LMULMAX2-NEXT: vle32.v v18, (a0) ; LMULMAX2-NEXT: addi a0, a1, 96 ; LMULMAX2-NEXT: vle32.v v20, (a0) -; LMULMAX2-NEXT: addi a0, sp, 192 +; LMULMAX2-NEXT: addi a0, sp, 64 ; LMULMAX2-NEXT: vse32.v v20, (a0) -; LMULMAX2-NEXT: addi a0, sp, 160 +; LMULMAX2-NEXT: addi a0, sp, 32 ; LMULMAX2-NEXT: vse32.v v18, (a0) -; LMULMAX2-NEXT: addi a0, sp, 128 -; LMULMAX2-NEXT: addi a1, sp, 128 +; LMULMAX2-NEXT: mv a1, sp +; LMULMAX2-NEXT: mv a0, sp ; LMULMAX2-NEXT: vse32.v v16, (a1) ; LMULMAX2-NEXT: vmv1r.v v9, v8 ; LMULMAX2-NEXT: vmv1r.v v10, v8 @@ -1117,21 +1117,21 @@ ; LMULMAX2-NEXT: vmv1r.v v12, v8 ; LMULMAX2-NEXT: vmv.v.v v22, v14 ; LMULMAX2-NEXT: call split_vector_args@plt -; LMULMAX2-NEXT: addi sp, s0, -256 -; LMULMAX2-NEXT: ld ra, 248(sp) # 8-byte Folded Reload -; LMULMAX2-NEXT: ld s0, 240(sp) # 8-byte Folded Reload -; LMULMAX2-NEXT: addi sp, sp, 256 +; LMULMAX2-NEXT: addi sp, s0, -128 +; LMULMAX2-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; LMULMAX2-NEXT: ld s0, 112(sp) # 8-byte Folded Reload +; LMULMAX2-NEXT: addi sp, sp, 128 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: call_split_vector_args: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi sp, sp, -256 -; LMULMAX1-NEXT: .cfi_def_cfa_offset 256 -; LMULMAX1-NEXT: sd ra, 248(sp) # 8-byte Folded Spill -; LMULMAX1-NEXT: sd s0, 240(sp) # 8-byte Folded Spill +; LMULMAX1-NEXT: addi sp, sp, -128 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 128 +; LMULMAX1-NEXT: sd ra, 120(sp) # 8-byte Folded Spill +; LMULMAX1-NEXT: sd s0, 112(sp) # 8-byte Folded Spill ; LMULMAX1-NEXT: .cfi_offset ra, -8 ; LMULMAX1-NEXT: .cfi_offset s0, -16 -; LMULMAX1-NEXT: addi s0, sp, 256 +; LMULMAX1-NEXT: addi s0, sp, 128 ; LMULMAX1-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX1-NEXT: andi sp, sp, -128 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu @@ -1152,16 +1152,16 @@ ; LMULMAX1-NEXT: vle32.v v19, (a0) ; LMULMAX1-NEXT: addi a0, a1, 112 ; LMULMAX1-NEXT: vle32.v v20, (a0) -; LMULMAX1-NEXT: addi a0, sp, 192 +; LMULMAX1-NEXT: addi a0, sp, 64 ; LMULMAX1-NEXT: vse32.v v20, (a0) -; LMULMAX1-NEXT: addi a0, sp, 176 +; LMULMAX1-NEXT: addi a0, sp, 48 ; LMULMAX1-NEXT: vse32.v v19, (a0) -; LMULMAX1-NEXT: addi a0, sp, 160 +; LMULMAX1-NEXT: addi a0, sp, 32 ; LMULMAX1-NEXT: vse32.v v18, (a0) -; LMULMAX1-NEXT: addi a0, sp, 144 +; LMULMAX1-NEXT: addi a0, sp, 16 ; LMULMAX1-NEXT: vse32.v v17, (a0) -; LMULMAX1-NEXT: addi a0, sp, 128 -; LMULMAX1-NEXT: addi a1, sp, 128 +; LMULMAX1-NEXT: mv a1, sp +; LMULMAX1-NEXT: mv a0, sp ; LMULMAX1-NEXT: vse32.v v16, (a1) ; LMULMAX1-NEXT: vmv1r.v v9, v8 ; LMULMAX1-NEXT: vmv1r.v v10, v8 @@ -1171,10 +1171,10 @@ ; LMULMAX1-NEXT: vmv.v.v v22, v14 ; LMULMAX1-NEXT: vmv.v.v v23, v15 ; LMULMAX1-NEXT: call split_vector_args@plt -; LMULMAX1-NEXT: addi sp, s0, -256 -; LMULMAX1-NEXT: ld ra, 248(sp) # 8-byte Folded Reload -; LMULMAX1-NEXT: ld s0, 240(sp) # 8-byte Folded Reload -; LMULMAX1-NEXT: addi sp, sp, 256 +; LMULMAX1-NEXT: addi sp, s0, -128 +; LMULMAX1-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; LMULMAX1-NEXT: ld s0, 112(sp) # 8-byte Folded Reload +; LMULMAX1-NEXT: addi sp, sp, 128 ; LMULMAX1-NEXT: ret %a = load <2 x i32>, <2 x i32>* %pa %b = load <32 x i32>, <32 x i32>* %pb @@ -1187,80 +1187,68 @@ define <32 x i32> @vector_arg_via_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %8) { ; LMULMAX8-LABEL: vector_arg_via_stack: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: addi sp, sp, -16 -; LMULMAX8-NEXT: .cfi_def_cfa_offset 16 ; LMULMAX8-NEXT: li a0, 32 +; LMULMAX8-NEXT: mv a1, sp ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; LMULMAX8-NEXT: addi a0, sp, 16 -; LMULMAX8-NEXT: vle32.v v16, (a0) +; LMULMAX8-NEXT: vle32.v v16, (a1) ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 -; LMULMAX8-NEXT: addi sp, sp, 16 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: vector_arg_via_stack: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: addi sp, sp, -16 -; LMULMAX4-NEXT: .cfi_def_cfa_offset 16 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu -; LMULMAX4-NEXT: addi a0, sp, 16 +; LMULMAX4-NEXT: mv a0, sp ; LMULMAX4-NEXT: vle32.v v16, (a0) -; LMULMAX4-NEXT: addi a0, sp, 80 +; LMULMAX4-NEXT: addi a0, sp, 64 ; LMULMAX4-NEXT: vle32.v v20, (a0) ; LMULMAX4-NEXT: vadd.vv v8, v8, v16 ; LMULMAX4-NEXT: vadd.vv v12, v12, v20 -; LMULMAX4-NEXT: addi sp, sp, 16 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: vector_arg_via_stack: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi sp, sp, -16 -; LMULMAX2-NEXT: .cfi_def_cfa_offset 16 ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX2-NEXT: addi a0, sp, 16 +; LMULMAX2-NEXT: addi a0, sp, 64 ; LMULMAX2-NEXT: vle32.v v16, (a0) -; LMULMAX2-NEXT: addi a0, sp, 48 +; LMULMAX2-NEXT: mv a0, sp ; LMULMAX2-NEXT: vle32.v v18, (a0) -; LMULMAX2-NEXT: addi a0, sp, 80 +; LMULMAX2-NEXT: addi a0, sp, 32 ; LMULMAX2-NEXT: vle32.v v20, (a0) -; LMULMAX2-NEXT: addi a0, sp, 112 +; LMULMAX2-NEXT: addi a0, sp, 96 ; LMULMAX2-NEXT: vle32.v v22, (a0) -; LMULMAX2-NEXT: vadd.vv v8, v8, v16 -; LMULMAX2-NEXT: vadd.vv v10, v10, v18 -; LMULMAX2-NEXT: vadd.vv v12, v12, v20 +; LMULMAX2-NEXT: vadd.vv v8, v8, v18 +; LMULMAX2-NEXT: vadd.vv v10, v10, v20 +; LMULMAX2-NEXT: vadd.vv v12, v12, v16 ; LMULMAX2-NEXT: vadd.vv v14, v14, v22 -; LMULMAX2-NEXT: addi sp, sp, 16 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: vector_arg_via_stack: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi sp, sp, -16 -; LMULMAX1-NEXT: .cfi_def_cfa_offset 16 +; LMULMAX1-NEXT: addi a0, sp, 112 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; LMULMAX1-NEXT: addi a0, sp, 128 ; LMULMAX1-NEXT: vle32.v v16, (a0) -; LMULMAX1-NEXT: addi a0, sp, 112 -; LMULMAX1-NEXT: vle32.v v17, (a0) ; LMULMAX1-NEXT: addi a0, sp, 96 -; LMULMAX1-NEXT: vle32.v v18, (a0) +; LMULMAX1-NEXT: vle32.v v17, (a0) ; LMULMAX1-NEXT: addi a0, sp, 80 +; LMULMAX1-NEXT: vle32.v v18, (a0) +; LMULMAX1-NEXT: addi a0, sp, 64 ; LMULMAX1-NEXT: vle32.v v19, (a0) -; LMULMAX1-NEXT: addi a0, sp, 16 -; LMULMAX1-NEXT: vle32.v v20, (a0) ; LMULMAX1-NEXT: addi a0, sp, 32 +; LMULMAX1-NEXT: vle32.v v20, (a0) +; LMULMAX1-NEXT: mv a0, sp ; LMULMAX1-NEXT: vle32.v v21, (a0) -; LMULMAX1-NEXT: addi a0, sp, 48 +; LMULMAX1-NEXT: addi a0, sp, 16 ; LMULMAX1-NEXT: vle32.v v22, (a0) -; LMULMAX1-NEXT: addi a0, sp, 64 +; LMULMAX1-NEXT: addi a0, sp, 48 ; LMULMAX1-NEXT: vle32.v v23, (a0) -; LMULMAX1-NEXT: vadd.vv v8, v8, v20 -; LMULMAX1-NEXT: vadd.vv v9, v9, v21 -; LMULMAX1-NEXT: vadd.vv v10, v10, v22 +; LMULMAX1-NEXT: vadd.vv v8, v8, v21 +; LMULMAX1-NEXT: vadd.vv v9, v9, v22 +; LMULMAX1-NEXT: vadd.vv v10, v10, v20 ; LMULMAX1-NEXT: vadd.vv v11, v11, v23 ; LMULMAX1-NEXT: vadd.vv v12, v12, v19 ; LMULMAX1-NEXT: vadd.vv v13, v13, v18 ; LMULMAX1-NEXT: vadd.vv v14, v14, v17 ; LMULMAX1-NEXT: vadd.vv v15, v15, v16 -; LMULMAX1-NEXT: addi sp, sp, 16 ; LMULMAX1-NEXT: ret %s = add <32 x i32> %x, %z ret <32 x i32> %s @@ -1421,12 +1409,9 @@ define <4 x i1> @vector_mask_arg_via_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %8, <4 x i1> %9, <4 x i1> %10) { ; CHECK-LABEL: vector_mask_arg_via_stack: ; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: addi a0, sp, 136 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu -; CHECK-NEXT: addi a0, sp, 152 ; CHECK-NEXT: vlm.v v0, (a0) -; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ret <4 x i1> %10 } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll @@ -321,130 +321,126 @@ define i1 @extractelt_v256i1(<256 x i8>* %x, i64 %idx) nounwind { ; RV32-LABEL: extractelt_v256i1: ; RV32: # %bb.0: -; RV32-NEXT: addi sp, sp, -512 -; RV32-NEXT: sw ra, 508(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s0, 504(sp) # 4-byte Folded Spill -; RV32-NEXT: addi s0, sp, 512 +; RV32-NEXT: addi sp, sp, -384 +; RV32-NEXT: sw ra, 380(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 376(sp) # 4-byte Folded Spill +; RV32-NEXT: addi s0, sp, 384 ; RV32-NEXT: andi sp, sp, -128 ; RV32-NEXT: andi a1, a1, 255 +; RV32-NEXT: li a2, 128 +; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; RV32-NEXT: addi a2, a0, 128 -; RV32-NEXT: li a3, 128 -; RV32-NEXT: vsetvli zero, a3, e8, m8, ta, mu -; RV32-NEXT: vle8.v v16, (a0) -; RV32-NEXT: vle8.v v24, (a2) -; RV32-NEXT: addi a0, sp, 128 -; RV32-NEXT: add a0, a0, a1 +; RV32-NEXT: vle8.v v16, (a2) +; RV32-NEXT: vle8.v v24, (a0) +; RV32-NEXT: mv a0, sp +; RV32-NEXT: add a1, a0, a1 ; RV32-NEXT: vmseq.vi v8, v16, 0 ; RV32-NEXT: vmseq.vi v0, v24, 0 ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: vmerge.vim v24, v16, 1, v0 -; RV32-NEXT: addi a1, sp, 256 -; RV32-NEXT: vse8.v v24, (a1) +; RV32-NEXT: vse8.v v24, (a0) ; RV32-NEXT: vmv1r.v v0, v8 ; RV32-NEXT: vmerge.vim v8, v16, 1, v0 -; RV32-NEXT: addi a1, sp, 128 -; RV32-NEXT: vse8.v v8, (a1) -; RV32-NEXT: lb a0, 0(a0) -; RV32-NEXT: addi sp, s0, -512 -; RV32-NEXT: lw ra, 508(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s0, 504(sp) # 4-byte Folded Reload -; RV32-NEXT: addi sp, sp, 512 +; RV32-NEXT: addi a0, sp, 128 +; RV32-NEXT: vse8.v v8, (a0) +; RV32-NEXT: lb a0, 0(a1) +; RV32-NEXT: addi sp, s0, -384 +; RV32-NEXT: lw ra, 380(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 376(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 384 ; RV32-NEXT: ret ; ; RV64-LABEL: extractelt_v256i1: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -512 -; RV64-NEXT: sd ra, 504(sp) # 8-byte Folded Spill -; RV64-NEXT: sd s0, 496(sp) # 8-byte Folded Spill -; RV64-NEXT: addi s0, sp, 512 +; RV64-NEXT: addi sp, sp, -384 +; RV64-NEXT: sd ra, 376(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; RV64-NEXT: addi s0, sp, 384 ; RV64-NEXT: andi sp, sp, -128 ; RV64-NEXT: andi a1, a1, 255 +; RV64-NEXT: li a2, 128 +; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; RV64-NEXT: addi a2, a0, 128 -; RV64-NEXT: li a3, 128 -; RV64-NEXT: vsetvli zero, a3, e8, m8, ta, mu -; RV64-NEXT: vle8.v v16, (a0) -; RV64-NEXT: vle8.v v24, (a2) -; RV64-NEXT: addi a0, sp, 128 -; RV64-NEXT: add a0, a0, a1 +; RV64-NEXT: vle8.v v16, (a2) +; RV64-NEXT: vle8.v v24, (a0) +; RV64-NEXT: mv a0, sp +; RV64-NEXT: add a1, a0, a1 ; RV64-NEXT: vmseq.vi v8, v16, 0 ; RV64-NEXT: vmseq.vi v0, v24, 0 ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: vmerge.vim v24, v16, 1, v0 -; RV64-NEXT: addi a1, sp, 256 -; RV64-NEXT: vse8.v v24, (a1) +; RV64-NEXT: vse8.v v24, (a0) ; RV64-NEXT: vmv1r.v v0, v8 ; RV64-NEXT: vmerge.vim v8, v16, 1, v0 -; RV64-NEXT: addi a1, sp, 128 -; RV64-NEXT: vse8.v v8, (a1) -; RV64-NEXT: lb a0, 0(a0) -; RV64-NEXT: addi sp, s0, -512 -; RV64-NEXT: ld ra, 504(sp) # 8-byte Folded Reload -; RV64-NEXT: ld s0, 496(sp) # 8-byte Folded Reload -; RV64-NEXT: addi sp, sp, 512 +; RV64-NEXT: addi a0, sp, 128 +; RV64-NEXT: vse8.v v8, (a0) +; RV64-NEXT: lb a0, 0(a1) +; RV64-NEXT: addi sp, s0, -384 +; RV64-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 384 ; RV64-NEXT: ret ; ; RV32ZBS-LABEL: extractelt_v256i1: ; RV32ZBS: # %bb.0: -; RV32ZBS-NEXT: addi sp, sp, -512 -; RV32ZBS-NEXT: sw ra, 508(sp) # 4-byte Folded Spill -; RV32ZBS-NEXT: sw s0, 504(sp) # 4-byte Folded Spill -; RV32ZBS-NEXT: addi s0, sp, 512 +; RV32ZBS-NEXT: addi sp, sp, -384 +; RV32ZBS-NEXT: sw ra, 380(sp) # 4-byte Folded Spill +; RV32ZBS-NEXT: sw s0, 376(sp) # 4-byte Folded Spill +; RV32ZBS-NEXT: addi s0, sp, 384 ; RV32ZBS-NEXT: andi sp, sp, -128 ; RV32ZBS-NEXT: andi a1, a1, 255 +; RV32ZBS-NEXT: li a2, 128 +; RV32ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; RV32ZBS-NEXT: addi a2, a0, 128 -; RV32ZBS-NEXT: li a3, 128 -; RV32ZBS-NEXT: vsetvli zero, a3, e8, m8, ta, mu -; RV32ZBS-NEXT: vle8.v v16, (a0) -; RV32ZBS-NEXT: vle8.v v24, (a2) -; RV32ZBS-NEXT: addi a0, sp, 128 -; RV32ZBS-NEXT: add a0, a0, a1 +; RV32ZBS-NEXT: vle8.v v16, (a2) +; RV32ZBS-NEXT: vle8.v v24, (a0) +; RV32ZBS-NEXT: mv a0, sp +; RV32ZBS-NEXT: add a1, a0, a1 ; RV32ZBS-NEXT: vmseq.vi v8, v16, 0 ; RV32ZBS-NEXT: vmseq.vi v0, v24, 0 ; RV32ZBS-NEXT: vmv.v.i v16, 0 ; RV32ZBS-NEXT: vmerge.vim v24, v16, 1, v0 -; RV32ZBS-NEXT: addi a1, sp, 256 -; RV32ZBS-NEXT: vse8.v v24, (a1) +; RV32ZBS-NEXT: vse8.v v24, (a0) ; RV32ZBS-NEXT: vmv1r.v v0, v8 ; RV32ZBS-NEXT: vmerge.vim v8, v16, 1, v0 -; RV32ZBS-NEXT: addi a1, sp, 128 -; RV32ZBS-NEXT: vse8.v v8, (a1) -; RV32ZBS-NEXT: lb a0, 0(a0) -; RV32ZBS-NEXT: addi sp, s0, -512 -; RV32ZBS-NEXT: lw ra, 508(sp) # 4-byte Folded Reload -; RV32ZBS-NEXT: lw s0, 504(sp) # 4-byte Folded Reload -; RV32ZBS-NEXT: addi sp, sp, 512 +; RV32ZBS-NEXT: addi a0, sp, 128 +; RV32ZBS-NEXT: vse8.v v8, (a0) +; RV32ZBS-NEXT: lb a0, 0(a1) +; RV32ZBS-NEXT: addi sp, s0, -384 +; RV32ZBS-NEXT: lw ra, 380(sp) # 4-byte Folded Reload +; RV32ZBS-NEXT: lw s0, 376(sp) # 4-byte Folded Reload +; RV32ZBS-NEXT: addi sp, sp, 384 ; RV32ZBS-NEXT: ret ; ; RV64ZBS-LABEL: extractelt_v256i1: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: addi sp, sp, -512 -; RV64ZBS-NEXT: sd ra, 504(sp) # 8-byte Folded Spill -; RV64ZBS-NEXT: sd s0, 496(sp) # 8-byte Folded Spill -; RV64ZBS-NEXT: addi s0, sp, 512 +; RV64ZBS-NEXT: addi sp, sp, -384 +; RV64ZBS-NEXT: sd ra, 376(sp) # 8-byte Folded Spill +; RV64ZBS-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; RV64ZBS-NEXT: addi s0, sp, 384 ; RV64ZBS-NEXT: andi sp, sp, -128 ; RV64ZBS-NEXT: andi a1, a1, 255 +; RV64ZBS-NEXT: li a2, 128 +; RV64ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; RV64ZBS-NEXT: addi a2, a0, 128 -; RV64ZBS-NEXT: li a3, 128 -; RV64ZBS-NEXT: vsetvli zero, a3, e8, m8, ta, mu -; RV64ZBS-NEXT: vle8.v v16, (a0) -; RV64ZBS-NEXT: vle8.v v24, (a2) -; RV64ZBS-NEXT: addi a0, sp, 128 -; RV64ZBS-NEXT: add a0, a0, a1 +; RV64ZBS-NEXT: vle8.v v16, (a2) +; RV64ZBS-NEXT: vle8.v v24, (a0) +; RV64ZBS-NEXT: mv a0, sp +; RV64ZBS-NEXT: add a1, a0, a1 ; RV64ZBS-NEXT: vmseq.vi v8, v16, 0 ; RV64ZBS-NEXT: vmseq.vi v0, v24, 0 ; RV64ZBS-NEXT: vmv.v.i v16, 0 ; RV64ZBS-NEXT: vmerge.vim v24, v16, 1, v0 -; RV64ZBS-NEXT: addi a1, sp, 256 -; RV64ZBS-NEXT: vse8.v v24, (a1) +; RV64ZBS-NEXT: vse8.v v24, (a0) ; RV64ZBS-NEXT: vmv1r.v v0, v8 ; RV64ZBS-NEXT: vmerge.vim v8, v16, 1, v0 -; RV64ZBS-NEXT: addi a1, sp, 128 -; RV64ZBS-NEXT: vse8.v v8, (a1) -; RV64ZBS-NEXT: lb a0, 0(a0) -; RV64ZBS-NEXT: addi sp, s0, -512 -; RV64ZBS-NEXT: ld ra, 504(sp) # 8-byte Folded Reload -; RV64ZBS-NEXT: ld s0, 496(sp) # 8-byte Folded Reload -; RV64ZBS-NEXT: addi sp, sp, 512 +; RV64ZBS-NEXT: addi a0, sp, 128 +; RV64ZBS-NEXT: vse8.v v8, (a0) +; RV64ZBS-NEXT: lb a0, 0(a1) +; RV64ZBS-NEXT: addi sp, s0, -384 +; RV64ZBS-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; RV64ZBS-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; RV64ZBS-NEXT: addi sp, sp, 384 ; RV64ZBS-NEXT: ret %a = load <256 x i8>, <256 x i8>* %x %b = icmp eq <256 x i8> %a, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll @@ -53,23 +53,22 @@ ; ; LMULMAX2-LABEL: hang_when_merging_stores_after_legalization: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: addi sp, sp, -32 -; LMULMAX2-NEXT: .cfi_def_cfa_offset 32 -; LMULMAX2-NEXT: addi a0, sp, 24 +; LMULMAX2-NEXT: addi sp, sp, -16 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 16 +; LMULMAX2-NEXT: addi a0, sp, 8 ; LMULMAX2-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; LMULMAX2-NEXT: vse32.v v10, (a0) -; LMULMAX2-NEXT: vslidedown.vi v10, v10, 7 -; LMULMAX2-NEXT: addi a0, sp, 28 -; LMULMAX2-NEXT: vse32.v v10, (a0) -; LMULMAX2-NEXT: vslidedown.vi v10, v8, 7 -; LMULMAX2-NEXT: addi a0, sp, 20 -; LMULMAX2-NEXT: vse32.v v10, (a0) -; LMULMAX2-NEXT: addi a0, sp, 16 +; LMULMAX2-NEXT: mv a0, sp ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: vslidedown.vi v10, v10, 7 +; LMULMAX2-NEXT: addi a1, sp, 12 +; LMULMAX2-NEXT: vse32.v v10, (a1) +; LMULMAX2-NEXT: vslidedown.vi v8, v8, 7 +; LMULMAX2-NEXT: addi a1, sp, 4 +; LMULMAX2-NEXT: vse32.v v8, (a1) ; LMULMAX2-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; LMULMAX2-NEXT: addi a0, sp, 16 ; LMULMAX2-NEXT: vle32.v v8, (a0) -; LMULMAX2-NEXT: addi sp, sp, 32 +; LMULMAX2-NEXT: addi sp, sp, 16 ; LMULMAX2-NEXT: ret %z = shufflevector <8 x float> %x, <8 x float> %y, <4 x i32> ret <4 x float> %z @@ -246,20 +245,19 @@ define <8 x float> @splat_idx_v8f32(<8 x float> %v, i64 %idx) { ; LMULMAX1-LABEL: splat_idx_v8f32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi sp, sp, -48 -; LMULMAX1-NEXT: .cfi_def_cfa_offset 48 +; LMULMAX1-NEXT: addi sp, sp, -32 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 32 ; LMULMAX1-NEXT: andi a0, a0, 7 ; LMULMAX1-NEXT: slli a0, a0, 2 -; LMULMAX1-NEXT: addi a1, sp, 16 +; LMULMAX1-NEXT: mv a1, sp ; LMULMAX1-NEXT: add a0, a1, a0 -; LMULMAX1-NEXT: addi a1, sp, 32 +; LMULMAX1-NEXT: addi a2, sp, 16 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; LMULMAX1-NEXT: vse32.v v9, (a1) -; LMULMAX1-NEXT: addi a1, sp, 16 +; LMULMAX1-NEXT: vse32.v v9, (a2) ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: vlse32.v v8, (a0), zero ; LMULMAX1-NEXT: vmv.v.v v9, v8 -; LMULMAX1-NEXT: addi sp, sp, 48 +; LMULMAX1-NEXT: addi sp, sp, 32 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: splat_idx_v8f32: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -574,9 +574,8 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: addi a2, sp, 64 -; CHECK-NEXT: add a2, a2, a0 -; CHECK-NEXT: vl8re64.v v8, (a2) -; CHECK-NEXT: addi a2, sp, 64 +; CHECK-NEXT: add a3, a2, a0 +; CHECK-NEXT: vl8re64.v v8, (a3) ; CHECK-NEXT: vl8re64.v v16, (a2) ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: vs8r.v v8, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll @@ -52,8 +52,8 @@ ; LMULMAX8-RV32-NEXT: .cfi_def_cfa_offset 16 ; LMULMAX8-RV32-NEXT: sw a2, 12(sp) ; LMULMAX8-RV32-NEXT: sw a1, 8(sp) -; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX8-RV32-NEXT: addi a1, sp, 8 +; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX8-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX8-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX8-RV32-NEXT: addi sp, sp, 16 @@ -65,8 +65,8 @@ ; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 16 ; LMULMAX2-RV32-NEXT: sw a2, 12(sp) ; LMULMAX2-RV32-NEXT: sw a1, 8(sp) -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX2-RV32-NEXT: addi a1, sp, 8 +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX2-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32-NEXT: addi sp, sp, 16 @@ -78,8 +78,8 @@ ; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 16 ; LMULMAX1-RV32-NEXT: sw a2, 12(sp) ; LMULMAX1-RV32-NEXT: sw a1, 8(sp) -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV32-NEXT: addi a1, sp, 8 +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi sp, sp, 16 @@ -207,8 +207,8 @@ ; LMULMAX8-RV32-NEXT: .cfi_def_cfa_offset 16 ; LMULMAX8-RV32-NEXT: sw a2, 12(sp) ; LMULMAX8-RV32-NEXT: sw a1, 8(sp) -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX8-RV32-NEXT: addi a1, sp, 8 +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX8-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX8-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX8-RV32-NEXT: addi sp, sp, 16 @@ -220,8 +220,8 @@ ; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 16 ; LMULMAX2-RV32-NEXT: sw a2, 12(sp) ; LMULMAX2-RV32-NEXT: sw a1, 8(sp) -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: addi a1, sp, 8 +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll @@ -85,8 +85,8 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: sb a1, 15(sp) ; CHECK-NEXT: sb a0, 14(sp) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: addi a0, sp, 14 +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -99,8 +99,8 @@ ; ZVE32F-NEXT: .cfi_def_cfa_offset 16 ; ZVE32F-NEXT: sb a1, 15(sp) ; ZVE32F-NEXT: sb a0, 14(sp) -; ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; ZVE32F-NEXT: addi a0, sp, 14 +; ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; ZVE32F-NEXT: vle8.v v8, (a0) ; ZVE32F-NEXT: vand.vi v8, v8, 1 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 @@ -203,8 +203,8 @@ ; CHECK-NEXT: sb a1, 14(sp) ; CHECK-NEXT: sb a0, 13(sp) ; CHECK-NEXT: sb a0, 12(sp) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: addi a0, sp, 12 +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -219,8 +219,8 @@ ; ZVE32F-NEXT: sb a1, 14(sp) ; ZVE32F-NEXT: sb a0, 13(sp) ; ZVE32F-NEXT: sb a0, 12(sp) -; ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; ZVE32F-NEXT: addi a0, sp, 12 +; ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; ZVE32F-NEXT: vle8.v v8, (a0) ; ZVE32F-NEXT: vand.vi v8, v8, 1 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 @@ -243,8 +243,8 @@ ; CHECK-NEXT: sb a1, 14(sp) ; CHECK-NEXT: sb a0, 13(sp) ; CHECK-NEXT: sb zero, 12(sp) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: addi a0, sp, 12 +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -260,8 +260,8 @@ ; ZVE32F-NEXT: sb a1, 14(sp) ; ZVE32F-NEXT: sb a0, 13(sp) ; ZVE32F-NEXT: sb zero, 12(sp) -; ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; ZVE32F-NEXT: addi a0, sp, 12 +; ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; ZVE32F-NEXT: vle8.v v8, (a0) ; ZVE32F-NEXT: vand.vi v8, v8, 1 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 @@ -340,8 +340,8 @@ ; CHECK-NEXT: sb a1, 10(sp) ; CHECK-NEXT: sb a0, 9(sp) ; CHECK-NEXT: sb a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -361,8 +361,8 @@ ; ZVE32F-NEXT: sb a1, 10(sp) ; ZVE32F-NEXT: sb a0, 9(sp) ; ZVE32F-NEXT: sb a0, 8(sp) -; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; ZVE32F-NEXT: addi a0, sp, 8 +; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; ZVE32F-NEXT: vle8.v v8, (a0) ; ZVE32F-NEXT: vand.vi v8, v8, 1 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 @@ -393,8 +393,8 @@ ; CHECK-NEXT: sb a1, 10(sp) ; CHECK-NEXT: sb a0, 9(sp) ; CHECK-NEXT: sb a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -414,8 +414,8 @@ ; ZVE32F-NEXT: sb a1, 10(sp) ; ZVE32F-NEXT: sb a0, 9(sp) ; ZVE32F-NEXT: sb a0, 8(sp) -; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; ZVE32F-NEXT: addi a0, sp, 8 +; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; ZVE32F-NEXT: vle8.v v8, (a0) ; ZVE32F-NEXT: vand.vi v8, v8, 1 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 @@ -445,8 +445,8 @@ ; CHECK-NEXT: sb a1, 10(sp) ; CHECK-NEXT: sb a0, 9(sp) ; CHECK-NEXT: sb a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -465,8 +465,8 @@ ; ZVE32F-NEXT: sb a1, 10(sp) ; ZVE32F-NEXT: sb a0, 9(sp) ; ZVE32F-NEXT: sb a0, 8(sp) -; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; ZVE32F-NEXT: addi a0, sp, 8 +; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; ZVE32F-NEXT: vle8.v v8, (a0) ; ZVE32F-NEXT: vand.vi v8, v8, 1 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -7246,20 +7246,20 @@ ; ; RV32ZVE32F-LABEL: mgather_baseidx_v8i64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: addi sp, sp, -128 -; RV32ZVE32F-NEXT: .cfi_def_cfa_offset 128 -; RV32ZVE32F-NEXT: sw ra, 124(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s0, 120(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s2, 116(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s3, 112(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s4, 108(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s5, 104(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s6, 100(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s7, 96(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s8, 92(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s9, 88(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s10, 84(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s11, 80(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: addi sp, sp, -96 +; RV32ZVE32F-NEXT: .cfi_def_cfa_offset 96 +; RV32ZVE32F-NEXT: sw ra, 92(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s0, 88(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s2, 84(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s3, 80(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s4, 76(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s5, 72(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s6, 68(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s7, 64(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s8, 60(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s9, 56(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s10, 52(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s11, 48(sp) # 4-byte Folded Spill ; RV32ZVE32F-NEXT: .cfi_offset ra, -4 ; RV32ZVE32F-NEXT: .cfi_offset s0, -8 ; RV32ZVE32F-NEXT: .cfi_offset s2, -12 @@ -7272,7 +7272,7 @@ ; RV32ZVE32F-NEXT: .cfi_offset s9, -40 ; RV32ZVE32F-NEXT: .cfi_offset s10, -44 ; RV32ZVE32F-NEXT: .cfi_offset s11, -48 -; RV32ZVE32F-NEXT: addi s0, sp, 128 +; RV32ZVE32F-NEXT: addi s0, sp, 96 ; RV32ZVE32F-NEXT: .cfi_def_cfa s0, 0 ; RV32ZVE32F-NEXT: andi sp, sp, -32 ; RV32ZVE32F-NEXT: lw a4, 60(a3) @@ -7299,16 +7299,16 @@ ; RV32ZVE32F-NEXT: lw s11, 48(a2) ; RV32ZVE32F-NEXT: lw ra, 40(a2) ; RV32ZVE32F-NEXT: lw a2, 32(a2) -; RV32ZVE32F-NEXT: sw s10, 60(sp) -; RV32ZVE32F-NEXT: sw s11, 56(sp) -; RV32ZVE32F-NEXT: sw ra, 52(sp) -; RV32ZVE32F-NEXT: sw a2, 48(sp) -; RV32ZVE32F-NEXT: sw s9, 44(sp) -; RV32ZVE32F-NEXT: sw s8, 40(sp) -; RV32ZVE32F-NEXT: sw s7, 36(sp) -; RV32ZVE32F-NEXT: sw s6, 32(sp) +; RV32ZVE32F-NEXT: sw s10, 28(sp) +; RV32ZVE32F-NEXT: sw s11, 24(sp) +; RV32ZVE32F-NEXT: sw ra, 20(sp) +; RV32ZVE32F-NEXT: sw a2, 16(sp) +; RV32ZVE32F-NEXT: sw s9, 12(sp) +; RV32ZVE32F-NEXT: sw s8, 8(sp) +; RV32ZVE32F-NEXT: sw s7, 4(sp) +; RV32ZVE32F-NEXT: sw s6, 0(sp) +; RV32ZVE32F-NEXT: mv a2, sp ; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; RV32ZVE32F-NEXT: addi a2, sp, 32 ; RV32ZVE32F-NEXT: vle32.v v8, (a2) ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 @@ -7360,20 +7360,20 @@ ; RV32ZVE32F-NEXT: sw a6, 52(a0) ; RV32ZVE32F-NEXT: sw a5, 56(a0) ; RV32ZVE32F-NEXT: sw a4, 60(a0) -; RV32ZVE32F-NEXT: addi sp, s0, -128 -; RV32ZVE32F-NEXT: lw ra, 124(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s0, 120(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s2, 116(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s3, 112(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s4, 108(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s5, 104(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s6, 100(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s7, 96(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s8, 92(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s9, 88(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s10, 84(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s11, 80(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: addi sp, sp, 128 +; RV32ZVE32F-NEXT: addi sp, s0, -96 +; RV32ZVE32F-NEXT: lw ra, 92(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s0, 88(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s2, 84(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s3, 80(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s4, 76(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s5, 72(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s6, 68(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s7, 64(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s8, 60(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s9, 56(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s10, 52(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s11, 48(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: addi sp, sp, 96 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB57_10: # %cond.load ; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu @@ -12697,13 +12697,13 @@ ; ; RV32ZVE32F-LABEL: mgather_baseidx_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: addi sp, sp, -96 -; RV32ZVE32F-NEXT: .cfi_def_cfa_offset 96 -; RV32ZVE32F-NEXT: sw ra, 92(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s0, 88(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: addi sp, sp, -64 +; RV32ZVE32F-NEXT: .cfi_def_cfa_offset 64 +; RV32ZVE32F-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s0, 56(sp) # 4-byte Folded Spill ; RV32ZVE32F-NEXT: .cfi_offset ra, -4 ; RV32ZVE32F-NEXT: .cfi_offset s0, -8 -; RV32ZVE32F-NEXT: addi s0, sp, 96 +; RV32ZVE32F-NEXT: addi s0, sp, 64 ; RV32ZVE32F-NEXT: .cfi_def_cfa s0, 0 ; RV32ZVE32F-NEXT: andi sp, sp, -32 ; RV32ZVE32F-NEXT: lw a3, 0(a2) @@ -12714,16 +12714,16 @@ ; RV32ZVE32F-NEXT: lw t0, 48(a2) ; RV32ZVE32F-NEXT: lw t1, 40(a2) ; RV32ZVE32F-NEXT: lw a2, 32(a2) -; RV32ZVE32F-NEXT: sw a7, 60(sp) -; RV32ZVE32F-NEXT: sw t0, 56(sp) -; RV32ZVE32F-NEXT: sw t1, 52(sp) -; RV32ZVE32F-NEXT: sw a2, 48(sp) -; RV32ZVE32F-NEXT: sw a6, 44(sp) -; RV32ZVE32F-NEXT: sw a5, 40(sp) -; RV32ZVE32F-NEXT: sw a4, 36(sp) -; RV32ZVE32F-NEXT: sw a3, 32(sp) +; RV32ZVE32F-NEXT: sw a7, 28(sp) +; RV32ZVE32F-NEXT: sw t0, 24(sp) +; RV32ZVE32F-NEXT: sw t1, 20(sp) +; RV32ZVE32F-NEXT: sw a2, 16(sp) +; RV32ZVE32F-NEXT: sw a6, 12(sp) +; RV32ZVE32F-NEXT: sw a5, 8(sp) +; RV32ZVE32F-NEXT: sw a4, 4(sp) +; RV32ZVE32F-NEXT: sw a3, 0(sp) +; RV32ZVE32F-NEXT: mv a2, sp ; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; RV32ZVE32F-NEXT: addi a2, sp, 32 ; RV32ZVE32F-NEXT: vle32.v v8, (a2) ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 @@ -12766,10 +12766,10 @@ ; RV32ZVE32F-NEXT: fsd fa5, 40(a0) ; RV32ZVE32F-NEXT: fsd fa6, 48(a0) ; RV32ZVE32F-NEXT: fsd fa7, 56(a0) -; RV32ZVE32F-NEXT: addi sp, s0, -96 -; RV32ZVE32F-NEXT: lw ra, 92(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s0, 88(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: addi sp, sp, 96 +; RV32ZVE32F-NEXT: addi sp, s0, -64 +; RV32ZVE32F-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: addi sp, sp, 64 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB96_10: # %cond.load ; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll @@ -273,8 +273,8 @@ ; RV32ZVE32F-NEXT: lw a0, 0(a0) ; RV32ZVE32F-NEXT: sb a1, 15(sp) ; RV32ZVE32F-NEXT: sb a0, 14(sp) -; RV32ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu ; RV32ZVE32F-NEXT: addi a0, sp, 15 +; RV32ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu ; RV32ZVE32F-NEXT: vle8.v v9, (a0) ; RV32ZVE32F-NEXT: addi a0, sp, 14 ; RV32ZVE32F-NEXT: vle8.v v10, (a0) @@ -302,8 +302,8 @@ ; RV64ZVE32F-NEXT: lbu a4, 15(sp) ; RV64ZVE32F-NEXT: sb a1, 14(sp) ; RV64ZVE32F-NEXT: sb a0, 13(sp) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu ; RV64ZVE32F-NEXT: addi a0, sp, 14 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu ; RV64ZVE32F-NEXT: vle8.v v9, (a0) ; RV64ZVE32F-NEXT: addi a0, sp, 13 ; RV64ZVE32F-NEXT: vle8.v v8, (a0) @@ -905,8 +905,8 @@ ; RV32ZVE32F-NEXT: lw a0, 0(a0) ; RV32ZVE32F-NEXT: sh a1, 14(sp) ; RV32ZVE32F-NEXT: sh a0, 12(sp) -; RV32ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; RV32ZVE32F-NEXT: addi a0, sp, 14 +; RV32ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; RV32ZVE32F-NEXT: vle16.v v9, (a0) ; RV32ZVE32F-NEXT: addi a0, sp, 12 ; RV32ZVE32F-NEXT: vle16.v v10, (a0) @@ -934,8 +934,8 @@ ; RV64ZVE32F-NEXT: lbu a4, 15(sp) ; RV64ZVE32F-NEXT: sh a1, 12(sp) ; RV64ZVE32F-NEXT: sh a0, 10(sp) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; RV64ZVE32F-NEXT: addi a0, sp, 12 +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; RV64ZVE32F-NEXT: vle16.v v9, (a0) ; RV64ZVE32F-NEXT: addi a0, sp, 10 ; RV64ZVE32F-NEXT: vle16.v v8, (a0) @@ -1888,8 +1888,8 @@ ; ; RV64ZVE32F-LABEL: mscatter_v2i64_truncstore_v2i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: addi sp, sp, -32 -; RV64ZVE32F-NEXT: .cfi_def_cfa_offset 32 +; RV64ZVE32F-NEXT: addi sp, sp, -16 +; RV64ZVE32F-NEXT: .cfi_def_cfa_offset 16 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV64ZVE32F-NEXT: vmv.v.i v8, 0 ; RV64ZVE32F-NEXT: vmerge.vim v8, v8, 1, v0 @@ -1899,15 +1899,15 @@ ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 0 ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64ZVE32F-NEXT: vmsne.vi v8, v9, 0 -; RV64ZVE32F-NEXT: addi a4, sp, 31 +; RV64ZVE32F-NEXT: addi a4, sp, 15 ; RV64ZVE32F-NEXT: vsm.v v8, (a4) -; RV64ZVE32F-NEXT: lbu a4, 31(sp) -; RV64ZVE32F-NEXT: sw a1, 24(sp) -; RV64ZVE32F-NEXT: sw a0, 20(sp) +; RV64ZVE32F-NEXT: lbu a4, 15(sp) +; RV64ZVE32F-NEXT: sw a1, 8(sp) +; RV64ZVE32F-NEXT: sw a0, 4(sp) +; RV64ZVE32F-NEXT: addi a0, sp, 8 ; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV64ZVE32F-NEXT: addi a0, sp, 24 ; RV64ZVE32F-NEXT: vle32.v v9, (a0) -; RV64ZVE32F-NEXT: addi a0, sp, 20 +; RV64ZVE32F-NEXT: addi a0, sp, 4 ; RV64ZVE32F-NEXT: vle32.v v8, (a0) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, mu ; RV64ZVE32F-NEXT: andi a0, a4, 1 @@ -1917,7 +1917,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 2 ; RV64ZVE32F-NEXT: bnez a0, .LBB24_4 ; RV64ZVE32F-NEXT: .LBB24_2: # %else2 -; RV64ZVE32F-NEXT: addi sp, sp, 32 +; RV64ZVE32F-NEXT: addi sp, sp, 16 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB24_3: # %cond.store ; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu @@ -1928,7 +1928,7 @@ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v8, (a3) -; RV64ZVE32F-NEXT: addi sp, sp, 32 +; RV64ZVE32F-NEXT: addi sp, sp, 16 ; RV64ZVE32F-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i32> call void @llvm.masked.scatter.v2i32.v2p0i32(<2 x i32> %tval, <2 x i32*> %ptrs, i32 4, <2 x i1> %m) @@ -6092,20 +6092,20 @@ ; ; RV32ZVE32F-LABEL: mscatter_baseidx_v8i64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: addi sp, sp, -128 -; RV32ZVE32F-NEXT: .cfi_def_cfa_offset 128 -; RV32ZVE32F-NEXT: sw ra, 124(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s0, 120(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s2, 116(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s3, 112(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s4, 108(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s5, 104(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s6, 100(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s7, 96(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s8, 92(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s9, 88(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s10, 84(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s11, 80(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: addi sp, sp, -96 +; RV32ZVE32F-NEXT: .cfi_def_cfa_offset 96 +; RV32ZVE32F-NEXT: sw ra, 92(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s0, 88(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s2, 84(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s3, 80(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s4, 76(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s5, 72(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s6, 68(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s7, 64(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s8, 60(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s9, 56(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s10, 52(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s11, 48(sp) # 4-byte Folded Spill ; RV32ZVE32F-NEXT: .cfi_offset ra, -4 ; RV32ZVE32F-NEXT: .cfi_offset s0, -8 ; RV32ZVE32F-NEXT: .cfi_offset s2, -12 @@ -6118,7 +6118,7 @@ ; RV32ZVE32F-NEXT: .cfi_offset s9, -40 ; RV32ZVE32F-NEXT: .cfi_offset s10, -44 ; RV32ZVE32F-NEXT: .cfi_offset s11, -48 -; RV32ZVE32F-NEXT: addi s0, sp, 128 +; RV32ZVE32F-NEXT: addi s0, sp, 96 ; RV32ZVE32F-NEXT: .cfi_def_cfa s0, 0 ; RV32ZVE32F-NEXT: andi sp, sp, -32 ; RV32ZVE32F-NEXT: lw a3, 60(a0) @@ -6145,16 +6145,16 @@ ; RV32ZVE32F-NEXT: lw s10, 48(a2) ; RV32ZVE32F-NEXT: lw s11, 40(a2) ; RV32ZVE32F-NEXT: lw a2, 32(a2) -; RV32ZVE32F-NEXT: sw s9, 60(sp) -; RV32ZVE32F-NEXT: sw s10, 56(sp) -; RV32ZVE32F-NEXT: sw s11, 52(sp) -; RV32ZVE32F-NEXT: sw a2, 48(sp) -; RV32ZVE32F-NEXT: sw s8, 44(sp) -; RV32ZVE32F-NEXT: sw s7, 40(sp) -; RV32ZVE32F-NEXT: sw s6, 36(sp) -; RV32ZVE32F-NEXT: sw a0, 32(sp) +; RV32ZVE32F-NEXT: sw s9, 28(sp) +; RV32ZVE32F-NEXT: sw s10, 24(sp) +; RV32ZVE32F-NEXT: sw s11, 20(sp) +; RV32ZVE32F-NEXT: sw a2, 16(sp) +; RV32ZVE32F-NEXT: sw s8, 12(sp) +; RV32ZVE32F-NEXT: sw s7, 8(sp) +; RV32ZVE32F-NEXT: sw s6, 4(sp) +; RV32ZVE32F-NEXT: sw a0, 0(sp) +; RV32ZVE32F-NEXT: mv a0, sp ; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; RV32ZVE32F-NEXT: addi a0, sp, 32 ; RV32ZVE32F-NEXT: vle32.v v8, (a0) ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 @@ -6190,20 +6190,20 @@ ; RV32ZVE32F-NEXT: sw a4, 0(a0) ; RV32ZVE32F-NEXT: sw a3, 4(a0) ; RV32ZVE32F-NEXT: .LBB51_9: # %else14 -; RV32ZVE32F-NEXT: addi sp, s0, -128 -; RV32ZVE32F-NEXT: lw ra, 124(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s0, 120(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s2, 116(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s3, 112(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s4, 108(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s5, 104(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s6, 100(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s7, 96(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s8, 92(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s9, 88(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s10, 84(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s11, 80(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: addi sp, sp, 128 +; RV32ZVE32F-NEXT: addi sp, s0, -96 +; RV32ZVE32F-NEXT: lw ra, 92(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s0, 88(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s2, 84(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s3, 80(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s4, 76(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s5, 72(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s6, 68(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s7, 64(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s8, 60(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s9, 56(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s10, 52(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s11, 48(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: addi sp, sp, 96 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB51_10: # %cond.store ; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu @@ -11033,13 +11033,13 @@ ; ; RV32ZVE32F-LABEL: mscatter_baseidx_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: addi sp, sp, -96 -; RV32ZVE32F-NEXT: .cfi_def_cfa_offset 96 -; RV32ZVE32F-NEXT: sw ra, 92(sp) # 4-byte Folded Spill -; RV32ZVE32F-NEXT: sw s0, 88(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: addi sp, sp, -64 +; RV32ZVE32F-NEXT: .cfi_def_cfa_offset 64 +; RV32ZVE32F-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RV32ZVE32F-NEXT: sw s0, 56(sp) # 4-byte Folded Spill ; RV32ZVE32F-NEXT: .cfi_offset ra, -4 ; RV32ZVE32F-NEXT: .cfi_offset s0, -8 -; RV32ZVE32F-NEXT: addi s0, sp, 96 +; RV32ZVE32F-NEXT: addi s0, sp, 64 ; RV32ZVE32F-NEXT: .cfi_def_cfa s0, 0 ; RV32ZVE32F-NEXT: andi sp, sp, -32 ; RV32ZVE32F-NEXT: lw a2, 0(a1) @@ -11050,16 +11050,16 @@ ; RV32ZVE32F-NEXT: lw a7, 48(a1) ; RV32ZVE32F-NEXT: lw t0, 40(a1) ; RV32ZVE32F-NEXT: lw a1, 32(a1) -; RV32ZVE32F-NEXT: sw a6, 60(sp) -; RV32ZVE32F-NEXT: sw a7, 56(sp) -; RV32ZVE32F-NEXT: sw t0, 52(sp) -; RV32ZVE32F-NEXT: sw a1, 48(sp) -; RV32ZVE32F-NEXT: sw a5, 44(sp) -; RV32ZVE32F-NEXT: sw a4, 40(sp) -; RV32ZVE32F-NEXT: sw a3, 36(sp) -; RV32ZVE32F-NEXT: sw a2, 32(sp) +; RV32ZVE32F-NEXT: sw a6, 28(sp) +; RV32ZVE32F-NEXT: sw a7, 24(sp) +; RV32ZVE32F-NEXT: sw t0, 20(sp) +; RV32ZVE32F-NEXT: sw a1, 16(sp) +; RV32ZVE32F-NEXT: sw a5, 12(sp) +; RV32ZVE32F-NEXT: sw a4, 8(sp) +; RV32ZVE32F-NEXT: sw a3, 4(sp) +; RV32ZVE32F-NEXT: sw a2, 0(sp) +; RV32ZVE32F-NEXT: mv a1, sp ; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; RV32ZVE32F-NEXT: addi a1, sp, 32 ; RV32ZVE32F-NEXT: vle32.v v8, (a1) ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0 @@ -11094,10 +11094,10 @@ ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa7, 0(a0) ; RV32ZVE32F-NEXT: .LBB90_9: # %else14 -; RV32ZVE32F-NEXT: addi sp, s0, -96 -; RV32ZVE32F-NEXT: lw ra, 92(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: lw s0, 88(sp) # 4-byte Folded Reload -; RV32ZVE32F-NEXT: addi sp, sp, 96 +; RV32ZVE32F-NEXT: addi sp, s0, -64 +; RV32ZVE32F-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32ZVE32F-NEXT: addi sp, sp, 64 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB90_10: # %cond.store ; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll @@ -883,8 +883,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -917,8 +917,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -951,8 +951,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -985,8 +985,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -1019,8 +1019,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -1053,8 +1053,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredand.vs v9, v8, v9, v0.t @@ -1087,8 +1087,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredor.vs v9, v8, v9, v0.t @@ -1121,8 +1121,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -1155,8 +1155,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredsum.vs v10, v8, v10, v0.t @@ -1189,8 +1189,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t @@ -1223,8 +1223,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmax.vs v10, v8, v10, v0.t @@ -1257,8 +1257,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t @@ -1291,8 +1291,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmin.vs v10, v8, v10, v0.t @@ -1325,8 +1325,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredand.vs v10, v8, v10, v0.t @@ -1359,8 +1359,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredor.vs v10, v8, v10, v0.t @@ -1393,8 +1393,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredxor.vs v10, v8, v10, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll @@ -4468,8 +4468,8 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v9 @@ -4510,8 +4510,8 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v10 @@ -4552,8 +4552,8 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v12 @@ -4594,8 +4594,8 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 @@ -4639,8 +4639,8 @@ ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: vmin.vv v8, v8, v16 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 @@ -4692,8 +4692,8 @@ ; RV32-NEXT: vmin.vv v24, v0, v24 ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: vmin.vv v8, v8, v24 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 @@ -5200,8 +5200,8 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v9 @@ -5240,8 +5240,8 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v10 @@ -5280,8 +5280,8 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v12 @@ -5320,8 +5320,8 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v16 @@ -5363,8 +5363,8 @@ ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: vmax.vv v8, v8, v16 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v16 @@ -5414,8 +5414,8 @@ ; RV32-NEXT: vmax.vv v24, v0, v24 ; RV32-NEXT: vmax.vv v8, v8, v16 ; RV32-NEXT: vmax.vv v8, v8, v24 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll @@ -1464,8 +1464,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmseq.vv v12, v8, v16, v0.t @@ -1492,8 +1492,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmseq.vv v12, v16, v8, v0.t @@ -1557,8 +1557,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsne.vv v12, v8, v16, v0.t @@ -1585,8 +1585,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsne.vv v12, v16, v8, v0.t @@ -1650,8 +1650,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsltu.vv v12, v16, v8, v0.t @@ -1678,8 +1678,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsltu.vv v12, v8, v16, v0.t @@ -1743,8 +1743,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsleu.vv v12, v16, v8, v0.t @@ -1773,8 +1773,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsleu.vv v12, v8, v16, v0.t @@ -1838,8 +1838,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsltu.vv v12, v8, v16, v0.t @@ -1866,8 +1866,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsltu.vv v12, v16, v8, v0.t @@ -1931,8 +1931,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmslt.vv v12, v16, v8, v0.t @@ -1959,8 +1959,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmslt.vv v12, v8, v16, v0.t @@ -2024,8 +2024,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsle.vv v12, v16, v8, v0.t @@ -2054,8 +2054,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsle.vv v12, v8, v16, v0.t @@ -2119,8 +2119,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmslt.vv v12, v8, v16, v0.t @@ -2147,8 +2147,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmslt.vv v12, v16, v8, v0.t @@ -2212,8 +2212,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsle.vv v12, v8, v16, v0.t @@ -2240,8 +2240,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsle.vv v12, v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll @@ -1140,8 +1140,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v9, v0.t @@ -1166,8 +1166,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v9 @@ -1244,8 +1244,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v10, v0.t @@ -1270,8 +1270,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v10 @@ -1348,8 +1348,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v12, v0.t @@ -1374,8 +1374,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v12 @@ -1452,8 +1452,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t @@ -1478,8 +1478,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll @@ -985,8 +985,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vand.vv v8, v8, v9, v0.t @@ -1011,8 +1011,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vand.vv v8, v8, v9 @@ -1089,8 +1089,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vand.vv v8, v8, v10, v0.t @@ -1115,8 +1115,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vand.vv v8, v8, v10 @@ -1193,8 +1193,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vand.vv v8, v8, v12, v0.t @@ -1219,8 +1219,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vand.vv v8, v8, v12 @@ -1405,8 +1405,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vand.vv v8, v8, v16, v0.t @@ -1431,8 +1431,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vand.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll @@ -664,8 +664,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v9, v0.t @@ -690,8 +690,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v9 @@ -742,8 +742,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v10, v0.t @@ -768,8 +768,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v10 @@ -820,8 +820,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v12, v0.t @@ -846,8 +846,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v12 @@ -898,8 +898,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v16, v0.t @@ -924,8 +924,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll @@ -663,8 +663,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v9, v0.t @@ -689,8 +689,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v9 @@ -741,8 +741,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v10, v0.t @@ -767,8 +767,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v10 @@ -819,8 +819,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v12, v0.t @@ -845,8 +845,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v12 @@ -897,8 +897,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v16, v0.t @@ -923,8 +923,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll @@ -709,8 +709,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v9, v0.t @@ -735,8 +735,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v9 @@ -787,8 +787,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v10, v0.t @@ -813,8 +813,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v10 @@ -865,8 +865,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v12, v0.t @@ -891,8 +891,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v12 @@ -943,8 +943,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t @@ -969,8 +969,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll @@ -1047,8 +1047,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vor.vv v8, v8, v9, v0.t @@ -1073,8 +1073,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vor.vv v8, v8, v9 @@ -1151,8 +1151,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vor.vv v8, v8, v10, v0.t @@ -1177,8 +1177,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vor.vv v8, v8, v10 @@ -1255,8 +1255,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vor.vv v8, v8, v12, v0.t @@ -1281,8 +1281,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vor.vv v8, v8, v12 @@ -1359,8 +1359,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vor.vv v8, v8, v16, v0.t @@ -1385,8 +1385,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vor.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll @@ -570,8 +570,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -621,8 +621,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vmerge.vvm v8, v8, v10, v0 @@ -672,8 +672,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vmerge.vvm v8, v8, v12, v0 @@ -723,8 +723,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, mu ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll @@ -664,8 +664,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v9, v0.t @@ -690,8 +690,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v9 @@ -742,8 +742,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v10, v0.t @@ -768,8 +768,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v10 @@ -820,8 +820,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v12, v0.t @@ -846,8 +846,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v12 @@ -898,8 +898,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v16, v0.t @@ -924,8 +924,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll @@ -663,8 +663,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v9, v0.t @@ -689,8 +689,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v9 @@ -741,8 +741,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v10, v0.t @@ -767,8 +767,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v10 @@ -819,8 +819,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v12, v0.t @@ -845,8 +845,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v12 @@ -897,8 +897,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v16, v0.t @@ -923,8 +923,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll @@ -661,8 +661,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v9, v8, v0.t @@ -687,8 +687,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v9, v8 @@ -743,8 +743,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v10, v8, v0.t @@ -769,8 +769,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v10, v8 @@ -825,8 +825,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v12, v8, v0.t @@ -851,8 +851,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v12, v8 @@ -907,8 +907,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v16, v8, v0.t @@ -933,8 +933,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v16, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll @@ -455,8 +455,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -504,8 +504,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -553,8 +553,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -602,8 +602,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll @@ -455,8 +455,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -504,8 +504,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -553,8 +553,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -602,8 +602,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll @@ -467,8 +467,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -517,8 +517,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -567,8 +567,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -617,8 +617,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll @@ -467,8 +467,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -517,8 +517,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -567,8 +567,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -617,8 +617,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll @@ -697,8 +697,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v9, v0.t @@ -723,8 +723,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v9 @@ -775,8 +775,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v10, v0.t @@ -801,8 +801,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v10 @@ -853,8 +853,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v12, v0.t @@ -879,8 +879,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v12 @@ -931,8 +931,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v16, v0.t @@ -957,8 +957,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll @@ -1385,8 +1385,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v9, v0.t @@ -1411,8 +1411,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v9 @@ -1515,8 +1515,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v10, v0.t @@ -1541,8 +1541,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v10 @@ -1645,8 +1645,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v12, v0.t @@ -1671,8 +1671,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v12 @@ -1775,8 +1775,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v16, v0.t @@ -1801,8 +1801,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll --- a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll @@ -16,7 +16,8 @@ ; CHECK-NEXT: liveins: $v8 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: PseudoVSE64_V_M1 [[COPY]], %stack.0.a, 1, 6 /* e64 */ + ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI %stack.0.a, 0 + ; CHECK-NEXT: PseudoVSE64_V_M1 [[COPY]], killed [[ADDI]], 1, 6 /* e64 */ ; CHECK-NEXT: [[LD:%[0-9]+]]:gpr = LD %stack.0.a, 0 :: (dereferenceable load (s64) from %ir.a) ; CHECK-NEXT: $x10 = COPY [[LD]] ; CHECK-NEXT: PseudoRET implicit $x10 diff --git a/llvm/test/CodeGen/RISCV/rvv/localvar.ll b/llvm/test/CodeGen/RISCV/rvv/localvar.ll --- a/llvm/test/CodeGen/RISCV/rvv/localvar.ll +++ b/llvm/test/CodeGen/RISCV/rvv/localvar.ll @@ -10,10 +10,10 @@ ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: sub sp, sp, a0 -; RV64IV-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: add a0, sp, a0 ; RV64IV-NEXT: addi a0, a0, 16 +; RV64IV-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; RV64IV-NEXT: vle8.v v8, (a0) ; RV64IV-NEXT: addi a0, sp, 16 ; RV64IV-NEXT: vle8.v v8, (a0) @@ -153,24 +153,24 @@ define void @local_var_m2_mix_local_scalar() { ; RV64IV-LABEL: local_var_m2_mix_local_scalar: ; RV64IV: # %bb.0: -; RV64IV-NEXT: addi sp, sp, -32 -; RV64IV-NEXT: .cfi_def_cfa_offset 32 +; RV64IV-NEXT: addi sp, sp, -16 +; RV64IV-NEXT: .cfi_def_cfa_offset 16 ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: sub sp, sp, a0 -; RV64IV-NEXT: lw a0, 28(sp) +; RV64IV-NEXT: lw a0, 12(sp) ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: add a0, sp, a0 -; RV64IV-NEXT: addi a0, a0, 32 +; RV64IV-NEXT: addi a0, a0, 16 ; RV64IV-NEXT: vl2r.v v8, (a0) -; RV64IV-NEXT: addi a0, sp, 32 +; RV64IV-NEXT: addi a0, sp, 16 ; RV64IV-NEXT: vl2r.v v8, (a0) -; RV64IV-NEXT: lw a0, 24(sp) +; RV64IV-NEXT: lw a0, 8(sp) ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: add sp, sp, a0 -; RV64IV-NEXT: addi sp, sp, 32 +; RV64IV-NEXT: addi sp, sp, 16 ; RV64IV-NEXT: ret %local_scalar0 = alloca i32 %local0 = alloca @@ -190,8 +190,10 @@ ; RV64IV-NEXT: .cfi_def_cfa_offset 32 ; RV64IV-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64IV-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64IV-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64IV-NEXT: .cfi_offset ra, -8 ; RV64IV-NEXT: .cfi_offset s0, -16 +; RV64IV-NEXT: .cfi_offset s1, -24 ; RV64IV-NEXT: addi s0, sp, 32 ; RV64IV-NEXT: .cfi_def_cfa s0, 0 ; RV64IV-NEXT: csrr a1, vlenb @@ -205,12 +207,12 @@ ; RV64IV-NEXT: slli a1, a1, 1 ; RV64IV-NEXT: sub a1, s0, a1 ; RV64IV-NEXT: addi a1, a1, -32 +; RV64IV-NEXT: csrr a2, vlenb +; RV64IV-NEXT: slli a2, a2, 1 +; RV64IV-NEXT: sub a2, s0, a2 +; RV64IV-NEXT: addi s1, a2, -32 ; RV64IV-NEXT: call notdead@plt -; RV64IV-NEXT: csrr a0, vlenb -; RV64IV-NEXT: slli a0, a0, 1 -; RV64IV-NEXT: sub a0, s0, a0 -; RV64IV-NEXT: addi a0, a0, -32 -; RV64IV-NEXT: vl2r.v v8, (a0) +; RV64IV-NEXT: vl2r.v v8, (s1) ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: sub a0, s0, a0 @@ -219,6 +221,7 @@ ; RV64IV-NEXT: addi sp, s0, -32 ; RV64IV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64IV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64IV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64IV-NEXT: addi sp, sp, 32 ; RV64IV-NEXT: ret %1 = alloca i8, i64 %n @@ -233,15 +236,17 @@ define void @local_var_m2_with_bp(i64 %n) { ; RV64IV-LABEL: local_var_m2_with_bp: ; RV64IV: # %bb.0: -; RV64IV-NEXT: addi sp, sp, -272 -; RV64IV-NEXT: .cfi_def_cfa_offset 272 -; RV64IV-NEXT: sd ra, 264(sp) # 8-byte Folded Spill -; RV64IV-NEXT: sd s0, 256(sp) # 8-byte Folded Spill -; RV64IV-NEXT: sd s1, 248(sp) # 8-byte Folded Spill +; RV64IV-NEXT: addi sp, sp, -256 +; RV64IV-NEXT: .cfi_def_cfa_offset 256 +; RV64IV-NEXT: sd ra, 248(sp) # 8-byte Folded Spill +; RV64IV-NEXT: sd s0, 240(sp) # 8-byte Folded Spill +; RV64IV-NEXT: sd s1, 232(sp) # 8-byte Folded Spill +; RV64IV-NEXT: sd s2, 224(sp) # 8-byte Folded Spill ; RV64IV-NEXT: .cfi_offset ra, -8 ; RV64IV-NEXT: .cfi_offset s0, -16 ; RV64IV-NEXT: .cfi_offset s1, -24 -; RV64IV-NEXT: addi s0, sp, 272 +; RV64IV-NEXT: .cfi_offset s2, -32 +; RV64IV-NEXT: addi s0, sp, 256 ; RV64IV-NEXT: .cfi_def_cfa s0, 0 ; RV64IV-NEXT: csrr a1, vlenb ; RV64IV-NEXT: slli a1, a1, 2 @@ -256,22 +261,23 @@ ; RV64IV-NEXT: csrr a2, vlenb ; RV64IV-NEXT: slli a2, a2, 1 ; RV64IV-NEXT: add a2, s1, a2 -; RV64IV-NEXT: addi a2, a2, 240 +; RV64IV-NEXT: addi a2, a2, 224 +; RV64IV-NEXT: csrr a3, vlenb +; RV64IV-NEXT: slli a3, a3, 1 +; RV64IV-NEXT: add a3, s1, a3 +; RV64IV-NEXT: addi s2, a3, 224 ; RV64IV-NEXT: call notdead2@plt ; RV64IV-NEXT: lw a0, 124(s1) -; RV64IV-NEXT: csrr a0, vlenb -; RV64IV-NEXT: slli a0, a0, 1 -; RV64IV-NEXT: add a0, s1, a0 -; RV64IV-NEXT: addi a0, a0, 240 -; RV64IV-NEXT: vl2r.v v8, (a0) -; RV64IV-NEXT: addi a0, s1, 240 +; RV64IV-NEXT: vl2r.v v8, (s2) +; RV64IV-NEXT: addi a0, s1, 224 ; RV64IV-NEXT: vl2r.v v8, (a0) ; RV64IV-NEXT: lw a0, 120(s1) -; RV64IV-NEXT: addi sp, s0, -272 -; RV64IV-NEXT: ld ra, 264(sp) # 8-byte Folded Reload -; RV64IV-NEXT: ld s0, 256(sp) # 8-byte Folded Reload -; RV64IV-NEXT: ld s1, 248(sp) # 8-byte Folded Reload -; RV64IV-NEXT: addi sp, sp, 272 +; RV64IV-NEXT: addi sp, s0, -256 +; RV64IV-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; RV64IV-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; RV64IV-NEXT: ld s1, 232(sp) # 8-byte Folded Reload +; RV64IV-NEXT: ld s2, 224(sp) # 8-byte Folded Reload +; RV64IV-NEXT: addi sp, sp, 256 ; RV64IV-NEXT: ret %1 = alloca i8, i64 %n %2 = alloca i32, align 128 diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll @@ -1383,8 +1383,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmseq.vv v0, v8, v9, v0.t @@ -1420,8 +1420,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll --- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll +++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll @@ -55,8 +55,8 @@ ; RV64IV-NEXT: add a0, sp, a0 ; RV64IV-NEXT: addi a0, a0, 64 ; RV64IV-NEXT: vl8r.v v24, (a0) -; RV64IV-NEXT: addi a0, sp, 64 ; RV64IV-NEXT: addi a1, sp, 64 +; RV64IV-NEXT: addi a0, sp, 64 ; RV64IV-NEXT: vs8r.v v24, (a1) ; RV64IV-NEXT: call callee@plt ; RV64IV-NEXT: addi sp, s0, -80 diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll @@ -34,19 +34,14 @@ ; CHECK-NEXT: andi sp, sp, -64 ; CHECK-NEXT: mv s1, sp ; CHECK-NEXT: addi t0, s1, 64 -; CHECK-NEXT: sd t0, 8(sp) -; CHECK-NEXT: csrr t0, vlenb -; CHECK-NEXT: slli t0, t0, 3 -; CHECK-NEXT: add t0, s1, t0 -; CHECK-NEXT: addi t0, t0, 64 -; CHECK-NEXT: sd t0, 0(sp) -; CHECK-NEXT: addi t0, s1, 64 -; CHECK-NEXT: vs8r.v v8, (t0) -; CHECK-NEXT: csrr t0, vlenb -; CHECK-NEXT: slli t0, t0, 3 -; CHECK-NEXT: add t0, s1, t0 -; CHECK-NEXT: addi t0, t0, 64 ; CHECK-NEXT: vs8r.v v8, (t0) +; CHECK-NEXT: csrr t1, vlenb +; CHECK-NEXT: slli t1, t1, 3 +; CHECK-NEXT: add t1, s1, t1 +; CHECK-NEXT: addi t1, t1, 64 +; CHECK-NEXT: vs8r.v v8, (t1) +; CHECK-NEXT: sd t0, 8(sp) +; CHECK-NEXT: sd t1, 0(sp) ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: call bar@plt ; CHECK-NEXT: addi sp, s0, -80 diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll @@ -5,34 +5,34 @@ define dso_local void @lots_args(i32 signext %x0, i32 signext %x1, %v0, i32 signext %x2, i32 signext %x3, i32 signext %x4, i32 signext %x5, i32 signext %x6, i32 %x7, i32 %x8, i32 %x9) #0 { ; CHECK-LABEL: lots_args: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -80 -; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill -; CHECK-NEXT: sd s0, 64(sp) # 8-byte Folded Spill -; CHECK-NEXT: addi s0, sp, 80 +; CHECK-NEXT: addi sp, sp, -64 +; CHECK-NEXT: sd ra, 56(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s0, 48(sp) # 8-byte Folded Spill +; CHECK-NEXT: addi s0, sp, 64 ; CHECK-NEXT: csrr t0, vlenb ; CHECK-NEXT: slli t0, t0, 3 ; CHECK-NEXT: sub sp, sp, t0 ; CHECK-NEXT: ld t0, 8(s0) ; CHECK-NEXT: ld t1, 0(s0) -; CHECK-NEXT: sw a0, -36(s0) -; CHECK-NEXT: sw a1, -40(s0) +; CHECK-NEXT: sw a0, -28(s0) +; CHECK-NEXT: sw a1, -32(s0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub a0, s0, a0 -; CHECK-NEXT: addi a0, a0, -80 +; CHECK-NEXT: addi a0, a0, -64 ; CHECK-NEXT: vs8r.v v8, (a0) -; CHECK-NEXT: sw a2, -44(s0) -; CHECK-NEXT: sw a3, -48(s0) -; CHECK-NEXT: sw a4, -52(s0) -; CHECK-NEXT: sw a5, -56(s0) -; CHECK-NEXT: sw a6, -60(s0) -; CHECK-NEXT: sw a7, -64(s0) -; CHECK-NEXT: sw t1, -68(s0) -; CHECK-NEXT: sw t0, -72(s0) -; CHECK-NEXT: addi sp, s0, -80 -; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload -; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload -; CHECK-NEXT: addi sp, sp, 80 +; CHECK-NEXT: sw a2, -36(s0) +; CHECK-NEXT: sw a3, -40(s0) +; CHECK-NEXT: sw a4, -44(s0) +; CHECK-NEXT: sw a5, -48(s0) +; CHECK-NEXT: sw a6, -52(s0) +; CHECK-NEXT: sw a7, -56(s0) +; CHECK-NEXT: sw t1, -60(s0) +; CHECK-NEXT: sw t0, -64(s0) +; CHECK-NEXT: addi sp, s0, -64 +; CHECK-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; CHECK-NEXT: addi sp, sp, 64 ; CHECK-NEXT: ret entry: %x0.addr = alloca i32, align 4 @@ -66,6 +66,7 @@ ; CHECK-NEXT: addi sp, sp, -112 ; CHECK-NEXT: sd ra, 104(sp) # 8-byte Folded Spill ; CHECK-NEXT: sd s0, 96(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s1, 88(sp) # 8-byte Folded Spill ; CHECK-NEXT: addi s0, sp, 112 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -76,14 +77,14 @@ ; CHECK-NEXT: vsetivli a0, 4, e32, m8, ta, mu ; CHECK-NEXT: sd a0, -64(s0) ; CHECK-NEXT: ld a0, -64(s0) +; CHECK-NEXT: addi a1, s0, -56 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: addi a0, s0, -56 -; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub a0, s0, a0 -; CHECK-NEXT: addi a0, a0, -112 -; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: addi s1, a0, -112 +; CHECK-NEXT: vs8r.v v8, (s1) ; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: sw a0, -68(s0) ; CHECK-NEXT: sw a0, -72(s0) @@ -97,11 +98,7 @@ ; CHECK-NEXT: sw a0, -104(s0) ; CHECK-NEXT: lw a0, -68(s0) ; CHECK-NEXT: lw a1, -72(s0) -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: sub a2, s0, a2 -; CHECK-NEXT: addi a2, a2, -112 -; CHECK-NEXT: vl8re32.v v8, (a2) +; CHECK-NEXT: vl8re32.v v8, (s1) ; CHECK-NEXT: lw a2, -76(s0) ; CHECK-NEXT: lw a3, -80(s0) ; CHECK-NEXT: lw a4, -84(s0) @@ -117,11 +114,7 @@ ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: lw a0, -68(s0) ; CHECK-NEXT: lw a1, -72(s0) -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: sub a2, s0, a2 -; CHECK-NEXT: addi a2, a2, -112 -; CHECK-NEXT: vl8re32.v v8, (a2) +; CHECK-NEXT: vl8re32.v v8, (s1) ; CHECK-NEXT: lw a2, -76(s0) ; CHECK-NEXT: lw a3, -80(s0) ; CHECK-NEXT: lw a4, -84(s0) @@ -139,6 +132,7 @@ ; CHECK-NEXT: addi sp, s0, -112 ; CHECK-NEXT: ld ra, 104(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 96(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s1, 88(sp) # 8-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 112 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll @@ -2532,8 +2532,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmseq.vv v0, v8, v9, v0.t @@ -2558,8 +2558,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmseq.vv v0, v9, v8, v0.t @@ -2618,8 +2618,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsne.vv v0, v8, v9, v0.t @@ -2644,8 +2644,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsne.vv v0, v9, v8, v0.t @@ -2704,8 +2704,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsltu.vv v0, v9, v8, v0.t @@ -2730,8 +2730,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsltu.vv v0, v8, v9, v0.t @@ -2790,8 +2790,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsleu.vv v0, v9, v8, v0.t @@ -2818,8 +2818,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsleu.vv v0, v8, v9, v0.t @@ -2878,8 +2878,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsltu.vv v0, v8, v9, v0.t @@ -2904,8 +2904,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsltu.vv v0, v9, v8, v0.t @@ -2964,8 +2964,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmslt.vv v0, v9, v8, v0.t @@ -2990,8 +2990,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmslt.vv v0, v8, v9, v0.t @@ -3050,8 +3050,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t @@ -3078,8 +3078,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsle.vv v0, v8, v9, v0.t @@ -3138,8 +3138,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmslt.vv v0, v8, v9, v0.t @@ -3164,8 +3164,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmslt.vv v0, v9, v8, v0.t @@ -3224,8 +3224,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsle.vv v0, v8, v9, v0.t @@ -3250,8 +3250,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t @@ -3315,8 +3315,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmseq.vv v16, v8, v24, v0.t @@ -3343,8 +3343,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmseq.vv v16, v24, v8, v0.t @@ -3408,8 +3408,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsne.vv v16, v8, v24, v0.t @@ -3436,8 +3436,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsne.vv v16, v24, v8, v0.t @@ -3501,8 +3501,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsltu.vv v16, v24, v8, v0.t @@ -3529,8 +3529,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsltu.vv v16, v8, v24, v0.t @@ -3594,8 +3594,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsleu.vv v16, v24, v8, v0.t @@ -3624,8 +3624,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsleu.vv v16, v8, v24, v0.t @@ -3689,8 +3689,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsltu.vv v16, v8, v24, v0.t @@ -3717,8 +3717,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsltu.vv v16, v24, v8, v0.t @@ -3782,8 +3782,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmslt.vv v16, v24, v8, v0.t @@ -3810,8 +3810,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmslt.vv v16, v8, v24, v0.t @@ -3875,8 +3875,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsle.vv v16, v24, v8, v0.t @@ -3905,8 +3905,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsle.vv v16, v8, v24, v0.t @@ -3970,8 +3970,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmslt.vv v16, v8, v24, v0.t @@ -3998,8 +3998,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmslt.vv v16, v24, v8, v0.t @@ -4063,8 +4063,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsle.vv v16, v8, v24, v0.t @@ -4091,8 +4091,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsle.vv v16, v24, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll @@ -2243,8 +2243,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmseq.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2268,8 +2268,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmseq.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2339,8 +2339,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsne.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2364,8 +2364,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsne.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2411,8 +2411,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsltu.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2436,8 +2436,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsltu.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2483,8 +2483,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsleu.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2509,8 +2509,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsleu.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2629,8 +2629,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsltu.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2654,8 +2654,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsltu.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2762,8 +2762,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsleu.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2787,8 +2787,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsleu.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2835,8 +2835,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmslt.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2860,8 +2860,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmslt.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2907,8 +2907,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsle.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2933,8 +2933,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsle.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -3029,8 +3029,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmslt.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -3054,8 +3054,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmslt.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -3150,8 +3150,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsle.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -3175,8 +3175,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsle.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll @@ -501,8 +501,8 @@ ; RV32-NEXT: lui a0, 797989 ; RV32-NEXT: addi a0, a0, -683 ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: vid.v v16 ; RV32-NEXT: vmul.vv v8, v16, v8 @@ -552,8 +552,8 @@ ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vid.v v8 ; RV32-NEXT: vadd.vv v16, v8, v16 @@ -667,10 +667,9 @@ ; RV32-NEXT: mulhu a0, a0, a2 ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v8, (a0), zero -; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vid.v v24 ; RV32-NEXT: vmul.vv v8, v24, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll @@ -293,8 +293,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmacc.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -328,8 +328,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmadd.vv v8, v10, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -363,8 +363,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vnmsac.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -398,8 +398,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vnmsub.vv v8, v10, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -919,8 +919,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -1151,8 +1151,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; RV32-NEXT: vsub.vv v8, v10, v9 @@ -1186,8 +1186,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; RV32-NEXT: vsadd.vv v8, v9, v10 @@ -1287,8 +1287,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; RV32-NEXT: vsmul.vv v8, v9, v10 @@ -1453,8 +1453,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; RV32-NEXT: vssub.vv v8, v9, v10 @@ -1488,8 +1488,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; RV32-NEXT: vssubu.vv v8, v9, v10 @@ -2477,8 +2477,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vmerge.vvm v8, v16, v24, v0 @@ -2509,9 +2509,9 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: li a1, -1 ; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: addi a1, sp, 8 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vlse64.v v24, (a1), zero ; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vmerge.vvm v8, v16, v24, v0 ; RV32-NEXT: addi sp, sp, 16 @@ -2658,8 +2658,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd.ll @@ -1859,8 +1859,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vaadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1894,8 +1894,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vaadd.vv v8, v9, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1929,8 +1929,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vaadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1964,8 +1964,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vaadd.vv v8, v10, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1999,8 +1999,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vaadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2034,8 +2034,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vaadd.vv v8, v12, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -2069,8 +2069,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vaadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vaadd.vv v8, v16, v24, v0.t ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll @@ -1859,8 +1859,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vaaddu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1894,8 +1894,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vaaddu.vv v8, v9, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1929,8 +1929,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vaaddu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1964,8 +1964,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vaaddu.vv v8, v10, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1999,8 +1999,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vaaddu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2034,8 +2034,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vaaddu.vv v8, v12, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -2069,8 +2069,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vaaddu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vaaddu.vv v8, v16, v24, v0.t ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll @@ -974,8 +974,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1004,8 +1004,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1034,8 +1034,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1064,8 +1064,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll @@ -672,8 +672,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -721,8 +721,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -770,8 +770,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -819,8 +819,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll @@ -1710,8 +1710,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v9, v0.t @@ -1736,8 +1736,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v9 @@ -1814,8 +1814,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v10, v0.t @@ -1840,8 +1840,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v10 @@ -1918,8 +1918,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v12, v0.t @@ -1944,8 +1944,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v12 @@ -2022,8 +2022,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t @@ -2048,8 +2048,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/rvv/vadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd.ll @@ -1859,8 +1859,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1937,8 +1937,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vadd.vv v8, v9, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1972,8 +1972,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -2007,8 +2007,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vadd.vv v8, v10, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -2042,8 +2042,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2077,8 +2077,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vadd.vv v8, v12, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -2112,8 +2112,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2147,8 +2147,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vadd.vv v8, v16, v24, v0.t ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vand.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vand.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll @@ -1081,8 +1081,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1153,8 +1153,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1225,8 +1225,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -1297,8 +1297,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll @@ -1505,8 +1505,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vand.vv v8, v8, v9, v0.t @@ -1531,8 +1531,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vand.vv v8, v8, v9 @@ -1609,8 +1609,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vand.vv v8, v8, v10, v0.t @@ -1635,8 +1635,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vand.vv v8, v8, v10 @@ -1713,8 +1713,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vand.vv v8, v8, v12, v0.t @@ -1739,8 +1739,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vand.vv v8, v8, v12 @@ -1817,8 +1817,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vand.vv v8, v8, v16, v0.t @@ -1843,8 +1843,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vand.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub.ll b/llvm/test/CodeGen/RISCV/rvv/vasub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub.ll @@ -1859,8 +1859,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vasub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1894,8 +1894,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vasub.vv v8, v9, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1929,8 +1929,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vasub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1964,8 +1964,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vasub.vv v8, v10, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1999,8 +1999,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vasub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2034,8 +2034,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vasub.vv v8, v12, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -2069,8 +2069,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vasub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vasub.vv v8, v16, v24, v0.t ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu.ll @@ -1859,8 +1859,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vasubu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1894,8 +1894,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vasubu.vv v8, v9, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1929,8 +1929,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vasubu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1964,8 +1964,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vasubu.vv v8, v10, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1999,8 +1999,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vasubu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2034,8 +2034,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vasubu.vv v8, v12, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -2069,8 +2069,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vasubu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vasubu.vv v8, v16, v24, v0.t ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll @@ -878,8 +878,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -907,8 +907,8 @@ ; RV32-V-NEXT: lui a0, 898779 ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-V-NEXT: vlse64.v v9, (a0), zero ; RV32-V-NEXT: vmulh.vv v8, v8, v9 ; RV32-V-NEXT: li a0, 63 @@ -959,8 +959,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -988,8 +988,8 @@ ; RV32-V-NEXT: lui a0, 898779 ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-V-NEXT: vlse64.v v10, (a0), zero ; RV32-V-NEXT: vmulh.vv v8, v8, v10 ; RV32-V-NEXT: li a0, 63 @@ -1040,8 +1040,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -1069,8 +1069,8 @@ ; RV32-V-NEXT: lui a0, 898779 ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-V-NEXT: vlse64.v v12, (a0), zero ; RV32-V-NEXT: vmulh.vv v8, v8, v12 ; RV32-V-NEXT: li a0, 63 @@ -1121,8 +1121,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -1150,8 +1150,8 @@ ; RV32-V-NEXT: lui a0, 898779 ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-V-NEXT: vlse64.v v16, (a0), zero ; RV32-V-NEXT: vmulh.vv v8, v8, v16 ; RV32-V-NEXT: li a0, 63 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll @@ -967,8 +967,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v9, v0.t @@ -993,8 +993,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v9 @@ -1045,8 +1045,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v10, v0.t @@ -1071,8 +1071,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v10 @@ -1123,8 +1123,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v12, v0.t @@ -1149,8 +1149,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v12 @@ -1201,8 +1201,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v16, v0.t @@ -1227,8 +1227,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll @@ -803,8 +803,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -830,8 +830,8 @@ ; RV32-V-NEXT: sw a0, 12(sp) ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-V-NEXT: vlse64.v v9, (a0), zero ; RV32-V-NEXT: vmulhu.vv v8, v8, v9 ; RV32-V-NEXT: li a0, 61 @@ -906,8 +906,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -933,8 +933,8 @@ ; RV32-V-NEXT: sw a0, 12(sp) ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-V-NEXT: vlse64.v v10, (a0), zero ; RV32-V-NEXT: vmulhu.vv v8, v8, v10 ; RV32-V-NEXT: li a0, 61 @@ -1009,8 +1009,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -1036,8 +1036,8 @@ ; RV32-V-NEXT: sw a0, 12(sp) ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-V-NEXT: vlse64.v v12, (a0), zero ; RV32-V-NEXT: vmulhu.vv v8, v8, v12 ; RV32-V-NEXT: li a0, 61 @@ -1112,8 +1112,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -1139,8 +1139,8 @@ ; RV32-V-NEXT: sw a0, 12(sp) ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-V-NEXT: vlse64.v v16, (a0), zero ; RV32-V-NEXT: vmulhu.vv v8, v8, v16 ; RV32-V-NEXT: li a0, 61 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll @@ -966,8 +966,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v9, v0.t @@ -992,8 +992,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v9 @@ -1044,8 +1044,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v10, v0.t @@ -1070,8 +1070,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v10 @@ -1122,8 +1122,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v12, v0.t @@ -1148,8 +1148,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v12 @@ -1200,8 +1200,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v16, v0.t @@ -1226,8 +1226,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll @@ -1599,8 +1599,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vmacc.vv v8, v10, v9 @@ -1629,8 +1629,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vmacc.vv v8, v10, v9, v0.t @@ -1661,8 +1661,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vmacc.vv v8, v12, v10 @@ -1691,8 +1691,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vmacc.vv v8, v12, v10, v0.t @@ -1723,8 +1723,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vmacc.vv v8, v16, v12 @@ -1753,8 +1753,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vmacc.vv v8, v16, v12, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll @@ -812,8 +812,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -838,8 +838,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmadc.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -864,8 +864,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmadc.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -890,8 +890,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmadc.vv v0, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll @@ -933,8 +933,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmadc.vvm v9, v8, v10, v0 ; CHECK-NEXT: vmv.v.v v0, v9 @@ -962,8 +962,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmadc.vvm v10, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -991,8 +991,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmadc.vvm v12, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v12 @@ -1020,8 +1020,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vmadc.vvm v16, v8, v24, v0 ; CHECK-NEXT: vmv1r.v v0, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll @@ -1565,8 +1565,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vmadd.vv v8, v10, v9 @@ -1595,8 +1595,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vmadd.vv v8, v10, v9, v0.t @@ -1626,8 +1626,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vmadd.vv v8, v12, v10 @@ -1656,8 +1656,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vmadd.vv v8, v12, v10, v0.t @@ -1687,8 +1687,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vmadd.vv v8, v16, v12 @@ -1717,8 +1717,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vmadd.vv v8, v16, v12, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll @@ -460,8 +460,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmadd.vv v8, v10, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -497,8 +497,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmacc.vv v8, v10, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -534,8 +534,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmadd.vv v8, v16, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -572,8 +572,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmacc.vv v8, v16, v24 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll @@ -704,8 +704,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -755,8 +755,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -806,8 +806,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -857,8 +857,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmaxu.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmaxu.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll @@ -704,8 +704,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -755,8 +755,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -806,8 +806,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -857,8 +857,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmin.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmin.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll @@ -704,8 +704,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -755,8 +755,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -806,8 +806,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -857,8 +857,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vminu.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vminu.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll @@ -704,8 +704,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -755,8 +755,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -806,8 +806,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -857,8 +857,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll @@ -812,8 +812,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -838,8 +838,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsbc.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -864,8 +864,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsbc.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -890,8 +890,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsbc.vv v0, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll @@ -933,8 +933,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsbc.vvm v9, v8, v10, v0 ; CHECK-NEXT: vmv.v.v v0, v9 @@ -962,8 +962,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsbc.vvm v10, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -991,8 +991,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsbc.vvm v12, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v12 @@ -1020,8 +1020,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vmsbc.vvm v16, v8, v24, v0 ; CHECK-NEXT: vmv1r.v v0, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll @@ -1653,8 +1653,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1681,8 +1681,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v11, (a0), zero ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 @@ -1712,8 +1712,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1740,8 +1740,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1771,8 +1771,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -1799,8 +1799,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll @@ -1668,8 +1668,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1696,8 +1696,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v11, (a0), zero ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 @@ -1727,8 +1727,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1755,8 +1755,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1786,8 +1786,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1814,8 +1814,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 @@ -2741,8 +2741,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2764,8 +2764,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmsle.vv v10, v12, v8, v0.t @@ -2789,8 +2789,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vmsle.vv v12, v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll @@ -1668,8 +1668,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1696,8 +1696,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v11, (a0), zero ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 @@ -1727,8 +1727,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1755,8 +1755,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1786,8 +1786,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1814,8 +1814,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 @@ -2753,8 +2753,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2776,8 +2776,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmsleu.vv v10, v12, v8, v0.t @@ -2801,8 +2801,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmv1r.v v12, v0 ; CHECK-NEXT: vmsleu.vv v12, v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll @@ -1653,8 +1653,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1681,8 +1681,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v11, (a0), zero ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 @@ -1712,8 +1712,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1740,8 +1740,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1771,8 +1771,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1799,8 +1799,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll @@ -1653,8 +1653,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1681,8 +1681,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v11, (a0), zero ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 @@ -1712,8 +1712,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1740,8 +1740,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1771,8 +1771,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1799,8 +1799,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll @@ -1653,8 +1653,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1681,8 +1681,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v11, (a0), zero ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 @@ -1712,8 +1712,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1740,8 +1740,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1771,8 +1771,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -1799,8 +1799,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll @@ -1653,8 +1653,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1681,8 +1681,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v11, (a0), zero ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 @@ -1712,8 +1712,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1740,8 +1740,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1771,8 +1771,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -1799,8 +1799,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll @@ -1653,8 +1653,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1681,8 +1681,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v11, (a0), zero ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 @@ -1712,8 +1712,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1740,8 +1740,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1771,8 +1771,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -1799,8 +1799,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll @@ -1653,8 +1653,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1681,8 +1681,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v11, (a0), zero ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 @@ -1712,8 +1712,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1740,8 +1740,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1771,8 +1771,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -1799,8 +1799,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll @@ -1653,8 +1653,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1681,8 +1681,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v11, (a0), zero ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 @@ -1712,8 +1712,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1740,8 +1740,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -1771,8 +1771,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -1799,8 +1799,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmv1r.v v13, v0 ; CHECK-NEXT: vmv1r.v v0, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll @@ -649,8 +649,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -721,8 +721,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -793,8 +793,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -865,8 +865,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll @@ -1011,8 +1011,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v9, v0.t @@ -1037,8 +1037,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v9 @@ -1089,8 +1089,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v10, v0.t @@ -1115,8 +1115,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v10 @@ -1167,8 +1167,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v12, v0.t @@ -1193,8 +1193,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v12 @@ -1245,8 +1245,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t @@ -1271,8 +1271,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul.ll b/llvm/test/CodeGen/RISCV/rvv/vmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul.ll @@ -1859,8 +1859,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1894,8 +1894,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmul.vv v8, v9, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1929,8 +1929,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1964,8 +1964,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmul.vv v8, v10, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1999,8 +1999,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2034,8 +2034,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmul.vv v8, v12, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -2069,8 +2069,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmul.vv v8, v16, v24, v0.t ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh.ll @@ -1859,8 +1859,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmulh.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1894,8 +1894,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmulh.vv v8, v9, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1929,8 +1929,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmulh.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1964,8 +1964,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmulh.vv v8, v10, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1999,8 +1999,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmulh.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2034,8 +2034,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmulh.vv v8, v12, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -2069,8 +2069,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmulh.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmulh.vv v8, v16, v24, v0.t ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll @@ -1859,8 +1859,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmulhsu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1894,8 +1894,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1929,8 +1929,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmulhsu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1964,8 +1964,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmulhsu.vv v8, v10, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1999,8 +1999,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmulhsu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2034,8 +2034,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmulhsu.vv v8, v12, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -2069,8 +2069,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmulhsu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll @@ -1859,8 +1859,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1894,8 +1894,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v9, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1929,8 +1929,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1964,8 +1964,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v10, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1999,8 +1999,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2034,8 +2034,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v12, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -2069,8 +2069,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v16, v24, v0.t ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll @@ -372,8 +372,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), zero ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -397,8 +397,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), zero ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -422,8 +422,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), zero ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -447,8 +447,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), zero ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll @@ -1565,8 +1565,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vnmsac.vv v8, v10, v9 @@ -1595,8 +1595,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vnmsac.vv v8, v10, v9, v0.t @@ -1626,8 +1626,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vnmsac.vv v8, v12, v10 @@ -1656,8 +1656,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vnmsac.vv v8, v12, v10, v0.t @@ -1687,8 +1687,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vnmsac.vv v8, v16, v12 @@ -1717,8 +1717,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vnmsac.vv v8, v16, v12, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll @@ -1565,8 +1565,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vnmsub.vv v8, v10, v9 @@ -1595,8 +1595,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vnmsub.vv v8, v10, v9, v0.t @@ -1626,8 +1626,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vnmsub.vv v8, v12, v10 @@ -1656,8 +1656,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vnmsub.vv v8, v12, v10, v0.t @@ -1687,8 +1687,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vnmsub.vv v8, v16, v12 @@ -1717,8 +1717,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vnmsub.vv v8, v16, v12, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll @@ -460,8 +460,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vnmsub.vv v8, v10, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -497,8 +497,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vnmsac.vv v8, v10, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -534,8 +534,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vnmsub.vv v8, v16, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -572,8 +572,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vnmsac.vv v8, v16, v24 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vor.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vor.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll @@ -891,8 +891,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -953,8 +953,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1015,8 +1015,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -1077,8 +1077,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll @@ -1519,8 +1519,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vor.vv v8, v8, v9, v0.t @@ -1545,8 +1545,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vor.vv v8, v8, v9 @@ -1623,8 +1623,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vor.vv v8, v8, v10, v0.t @@ -1649,8 +1649,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vor.vv v8, v8, v10 @@ -1727,8 +1727,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vor.vv v8, v8, v12, v0.t @@ -1753,8 +1753,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vor.vv v8, v8, v12 @@ -1831,8 +1831,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vor.vv v8, v8, v16, v0.t @@ -1857,8 +1857,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vor.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll @@ -963,8 +963,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -1014,8 +1014,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vmerge.vvm v8, v8, v10, v0 @@ -1065,8 +1065,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vmerge.vvm v8, v8, v12, v0 @@ -1116,8 +1116,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, mu ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll @@ -1371,8 +1371,8 @@ ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: sw a0, 12(sp) ; CHECK-NEXT: sw zero, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -1418,8 +1418,8 @@ ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: sw a0, 12(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -1577,8 +1577,8 @@ ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: sw a0, 12(sp) ; CHECK-NEXT: sw zero, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 @@ -1624,8 +1624,8 @@ ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: sw a0, 12(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v10 @@ -1783,8 +1783,8 @@ ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: sw a0, 12(sp) ; CHECK-NEXT: sw zero, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v12 @@ -1830,8 +1830,8 @@ ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: sw a0, 12(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll @@ -1328,8 +1328,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -1360,8 +1360,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e32, mf2, tu, mu ; RV32-NEXT: vwredsum.vs v9, v8, v9, v0.t @@ -1395,8 +1395,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e32, mf2, tu, mu ; RV32-NEXT: vwredsum.vs v9, v8, v9, v0.t @@ -1432,8 +1432,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -1466,8 +1466,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -1500,8 +1500,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -1534,8 +1534,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -1568,8 +1568,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredand.vs v9, v8, v9, v0.t @@ -1602,8 +1602,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredor.vs v9, v8, v9, v0.t @@ -1636,8 +1636,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -1670,8 +1670,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredsum.vs v10, v8, v10, v0.t @@ -1702,8 +1702,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu ; RV32-NEXT: vwredsum.vs v9, v8, v9, v0.t @@ -1737,8 +1737,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu ; RV32-NEXT: vwredsum.vs v9, v8, v9, v0.t @@ -1774,8 +1774,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t @@ -1808,8 +1808,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmax.vs v10, v8, v10, v0.t @@ -1842,8 +1842,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t @@ -1876,8 +1876,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmin.vs v10, v8, v10, v0.t @@ -1910,8 +1910,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredand.vs v10, v8, v10, v0.t @@ -1944,8 +1944,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredor.vs v10, v8, v10, v0.t @@ -1978,8 +1978,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredxor.vs v10, v8, v10, v0.t @@ -2012,8 +2012,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredsum.vs v12, v8, v12, v0.t @@ -2044,8 +2044,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e32, m2, tu, mu ; RV32-NEXT: vwredsum.vs v10, v8, v10, v0.t @@ -2079,8 +2079,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e32, m2, tu, mu ; RV32-NEXT: vwredsumu.vs v10, v8, v10, v0.t @@ -2116,8 +2116,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredmaxu.vs v12, v8, v12, v0.t @@ -2150,8 +2150,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredmax.vs v12, v8, v12, v0.t @@ -2184,8 +2184,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredminu.vs v12, v8, v12, v0.t @@ -2218,8 +2218,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredmin.vs v12, v8, v12, v0.t @@ -2252,8 +2252,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredand.vs v12, v8, v12, v0.t @@ -2286,8 +2286,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredor.vs v12, v8, v12, v0.t @@ -2320,8 +2320,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vredxor.vs v12, v8, v12, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll @@ -912,8 +912,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -941,8 +941,8 @@ ; RV32-V-NEXT: lui a0, 898779 ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-V-NEXT: vlse64.v v9, (a0), zero ; RV32-V-NEXT: vmulh.vv v9, v8, v9 ; RV32-V-NEXT: li a0, 63 @@ -997,8 +997,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1026,8 +1026,8 @@ ; RV32-V-NEXT: lui a0, 898779 ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-V-NEXT: vlse64.v v10, (a0), zero ; RV32-V-NEXT: vmulh.vv v10, v8, v10 ; RV32-V-NEXT: li a0, 63 @@ -1082,8 +1082,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -1111,8 +1111,8 @@ ; RV32-V-NEXT: lui a0, 898779 ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-V-NEXT: vlse64.v v12, (a0), zero ; RV32-V-NEXT: vmulh.vv v12, v8, v12 ; RV32-V-NEXT: li a0, 63 @@ -1167,8 +1167,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -1196,8 +1196,8 @@ ; RV32-V-NEXT: lui a0, 898779 ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-V-NEXT: vlse64.v v16, (a0), zero ; RV32-V-NEXT: vmulh.vv v16, v8, v16 ; RV32-V-NEXT: li a0, 63 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll @@ -967,8 +967,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v9, v0.t @@ -993,8 +993,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v9 @@ -1045,8 +1045,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v10, v0.t @@ -1071,8 +1071,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v10 @@ -1123,8 +1123,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v12, v0.t @@ -1149,8 +1149,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v12 @@ -1201,8 +1201,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v16, v0.t @@ -1227,8 +1227,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll @@ -837,8 +837,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -864,8 +864,8 @@ ; RV32-V-NEXT: sw a0, 12(sp) ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-V-NEXT: vlse64.v v9, (a0), zero ; RV32-V-NEXT: vmulhu.vv v9, v8, v9 ; RV32-V-NEXT: li a0, 61 @@ -948,8 +948,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -975,8 +975,8 @@ ; RV32-V-NEXT: sw a0, 12(sp) ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-V-NEXT: vlse64.v v10, (a0), zero ; RV32-V-NEXT: vmulhu.vv v10, v8, v10 ; RV32-V-NEXT: li a0, 61 @@ -1059,8 +1059,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -1086,8 +1086,8 @@ ; RV32-V-NEXT: sw a0, 12(sp) ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-V-NEXT: vlse64.v v12, (a0), zero ; RV32-V-NEXT: vmulhu.vv v12, v8, v12 ; RV32-V-NEXT: li a0, 61 @@ -1170,8 +1170,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -1197,8 +1197,8 @@ ; RV32-V-NEXT: sw a0, 12(sp) ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) -; RV32-V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-V-NEXT: addi a0, sp, 8 +; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-V-NEXT: vlse64.v v16, (a0), zero ; RV32-V-NEXT: vmulhu.vv v16, v8, v16 ; RV32-V-NEXT: li a0, 61 diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll @@ -966,8 +966,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v9, v0.t @@ -992,8 +992,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v9 @@ -1044,8 +1044,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v10, v0.t @@ -1070,8 +1070,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v10 @@ -1122,8 +1122,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v12, v0.t @@ -1148,8 +1148,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v12 @@ -1200,8 +1200,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v16, v0.t @@ -1226,8 +1226,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll @@ -441,8 +441,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsub.vv v8, v9, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -478,8 +478,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v10, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -515,8 +515,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v12, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -552,8 +552,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll @@ -985,8 +985,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v9, v8, v0.t @@ -1011,8 +1011,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v9, v8 @@ -1067,8 +1067,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v10, v8, v0.t @@ -1093,8 +1093,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v10, v8 @@ -1149,8 +1149,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v12, v8, v0.t @@ -1175,8 +1175,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v12, v8 @@ -1231,8 +1231,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v16, v8, v0.t @@ -1257,8 +1257,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v16, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub.ll @@ -843,8 +843,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsub.vv v8, v9, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -878,8 +878,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v10, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -913,8 +913,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v10, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -948,8 +948,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v12, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -983,8 +983,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v12, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1018,8 +1018,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v16, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1053,8 +1053,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1088,8 +1088,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsub.vv v8, v24, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll @@ -671,8 +671,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -720,8 +720,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -769,8 +769,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -818,8 +818,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll @@ -671,8 +671,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -720,8 +720,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -769,8 +769,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -818,8 +818,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll @@ -974,8 +974,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1004,8 +1004,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1034,8 +1034,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1064,8 +1064,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll @@ -664,8 +664,8 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -705,8 +705,8 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -746,8 +746,8 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -787,8 +787,8 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll @@ -47,8 +47,8 @@ ; RV32V-NEXT: lui a0, 1028096 ; RV32V-NEXT: addi a0, a0, -1281 ; RV32V-NEXT: sw a0, 8(sp) -; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32V-NEXT: addi a0, sp, 8 +; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32V-NEXT: vlse64.v v8, (a0), zero ; RV32V-NEXT: addi sp, sp, 16 ; RV32V-NEXT: ret @@ -73,8 +73,8 @@ ; RV32V-NEXT: .cfi_def_cfa_offset 16 ; RV32V-NEXT: sw a1, 12(sp) ; RV32V-NEXT: sw a0, 8(sp) -; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32V-NEXT: addi a0, sp, 8 +; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32V-NEXT: vlse64.v v8, (a0), zero ; RV32V-NEXT: addi sp, sp, 16 ; RV32V-NEXT: ret @@ -157,8 +157,8 @@ ; RV32V-NEXT: lui a0, 1028096 ; RV32V-NEXT: addi a0, a0, -1281 ; RV32V-NEXT: sw a0, 8(sp) -; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32V-NEXT: addi a0, sp, 8 +; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32V-NEXT: vlse64.v v16, (a0), zero ; RV32V-NEXT: vadd.vv v8, v8, v16 ; RV32V-NEXT: addi sp, sp, 16 @@ -188,8 +188,8 @@ ; RV32V-NEXT: lui a0, 1028096 ; RV32V-NEXT: addi a0, a0, -1281 ; RV32V-NEXT: sw a0, 8(sp) -; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32V-NEXT: addi a0, sp, 8 +; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32V-NEXT: vlse64.v v16, (a0), zero ; RV32V-NEXT: vadd.vv v8, v8, v16 ; RV32V-NEXT: addi sp, sp, 16 @@ -216,8 +216,8 @@ ; RV32V-NEXT: .cfi_def_cfa_offset 16 ; RV32V-NEXT: sw a1, 12(sp) ; RV32V-NEXT: sw a0, 8(sp) -; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32V-NEXT: addi a0, sp, 8 +; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32V-NEXT: vlse64.v v16, (a0), zero ; RV32V-NEXT: vadd.vv v8, v8, v16 ; RV32V-NEXT: addi sp, sp, 16 @@ -260,8 +260,8 @@ ; RV32V-NEXT: .cfi_def_cfa_offset 16 ; RV32V-NEXT: sw zero, 12(sp) ; RV32V-NEXT: sw a0, 8(sp) -; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32V-NEXT: addi a0, sp, 8 +; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32V-NEXT: vlse64.v v8, (a0), zero ; RV32V-NEXT: addi sp, sp, 16 ; RV32V-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll @@ -689,8 +689,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -739,8 +739,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -789,8 +789,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -839,8 +839,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll @@ -689,8 +689,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -739,8 +739,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -789,8 +789,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -839,8 +839,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll @@ -664,8 +664,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -712,8 +712,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -760,8 +760,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -808,8 +808,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll @@ -999,8 +999,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v9, v0.t @@ -1025,8 +1025,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v9 @@ -1077,8 +1077,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v10, v0.t @@ -1103,8 +1103,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v10 @@ -1155,8 +1155,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v12, v0.t @@ -1181,8 +1181,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v12 @@ -1233,8 +1233,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v16, v0.t @@ -1259,8 +1259,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub.ll b/llvm/test/CodeGen/RISCV/rvv/vsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub.ll @@ -1859,8 +1859,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1894,8 +1894,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v9, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1929,8 +1929,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1964,8 +1964,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v10, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -1999,8 +1999,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2034,8 +2034,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v12, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 @@ -2069,8 +2069,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsub.vv v8, v16, v24, v0.t ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll @@ -1897,8 +1897,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1927,8 +1927,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -1956,8 +1956,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1986,8 +1986,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2015,8 +2015,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2045,8 +2045,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 @@ -2074,8 +2074,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2104,8 +2104,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll @@ -1081,8 +1081,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1153,8 +1153,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1225,8 +1225,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -1297,8 +1297,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll @@ -1999,8 +1999,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v9, v0.t @@ -2025,8 +2025,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v9 @@ -2129,8 +2129,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v10, v0.t @@ -2155,8 +2155,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v10 @@ -2259,8 +2259,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v12, v0.t @@ -2285,8 +2285,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v12 @@ -2389,8 +2389,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v16, v0.t @@ -2415,8 +2415,8 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -599,15 +599,15 @@ ; ; RV32MV-LABEL: test_srem_vec: ; RV32MV: # %bb.0: -; RV32MV-NEXT: addi sp, sp, -96 -; RV32MV-NEXT: sw ra, 92(sp) # 4-byte Folded Spill -; RV32MV-NEXT: sw s0, 88(sp) # 4-byte Folded Spill -; RV32MV-NEXT: sw s2, 84(sp) # 4-byte Folded Spill -; RV32MV-NEXT: sw s3, 80(sp) # 4-byte Folded Spill -; RV32MV-NEXT: sw s4, 76(sp) # 4-byte Folded Spill -; RV32MV-NEXT: sw s5, 72(sp) # 4-byte Folded Spill -; RV32MV-NEXT: sw s6, 68(sp) # 4-byte Folded Spill -; RV32MV-NEXT: addi s0, sp, 96 +; RV32MV-NEXT: addi sp, sp, -64 +; RV32MV-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RV32MV-NEXT: sw s0, 56(sp) # 4-byte Folded Spill +; RV32MV-NEXT: sw s2, 52(sp) # 4-byte Folded Spill +; RV32MV-NEXT: sw s3, 48(sp) # 4-byte Folded Spill +; RV32MV-NEXT: sw s4, 44(sp) # 4-byte Folded Spill +; RV32MV-NEXT: sw s5, 40(sp) # 4-byte Folded Spill +; RV32MV-NEXT: sw s6, 36(sp) # 4-byte Folded Spill +; RV32MV-NEXT: addi s0, sp, 64 ; RV32MV-NEXT: andi sp, sp, -32 ; RV32MV-NEXT: mv s2, a0 ; RV32MV-NEXT: lw a0, 8(a0) @@ -631,28 +631,28 @@ ; RV32MV-NEXT: li a2, 6 ; RV32MV-NEXT: li a3, 0 ; RV32MV-NEXT: call __moddi3@plt -; RV32MV-NEXT: sw a1, 36(sp) -; RV32MV-NEXT: sw a0, 32(sp) +; RV32MV-NEXT: sw a1, 4(sp) +; RV32MV-NEXT: sw a0, 0(sp) ; RV32MV-NEXT: li a2, -5 ; RV32MV-NEXT: li a3, -1 ; RV32MV-NEXT: mv a0, s5 ; RV32MV-NEXT: mv a1, s6 ; RV32MV-NEXT: call __moddi3@plt -; RV32MV-NEXT: sw a1, 52(sp) -; RV32MV-NEXT: sw a0, 48(sp) +; RV32MV-NEXT: sw a1, 20(sp) +; RV32MV-NEXT: sw a0, 16(sp) ; RV32MV-NEXT: li a2, 7 ; RV32MV-NEXT: mv a0, s3 ; RV32MV-NEXT: mv a1, s4 ; RV32MV-NEXT: li a3, 0 ; RV32MV-NEXT: call __moddi3@plt -; RV32MV-NEXT: sw a1, 44(sp) -; RV32MV-NEXT: sw a0, 40(sp) +; RV32MV-NEXT: sw a1, 12(sp) +; RV32MV-NEXT: sw a0, 8(sp) ; RV32MV-NEXT: li a0, 85 ; RV32MV-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32MV-NEXT: vmv.s.x v0, a0 ; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32MV-NEXT: vmv.v.i v8, 1 -; RV32MV-NEXT: addi a0, sp, 32 +; RV32MV-NEXT: mv a0, sp ; RV32MV-NEXT: vle32.v v10, (a0) ; RV32MV-NEXT: lui a0, %hi(.LCPI3_0) ; RV32MV-NEXT: addi a0, a0, %lo(.LCPI3_0) @@ -690,23 +690,23 @@ ; RV32MV-NEXT: slli a0, a0, 2 ; RV32MV-NEXT: or a0, a1, a0 ; RV32MV-NEXT: sw a0, 8(s2) -; RV32MV-NEXT: addi sp, s0, -96 -; RV32MV-NEXT: lw ra, 92(sp) # 4-byte Folded Reload -; RV32MV-NEXT: lw s0, 88(sp) # 4-byte Folded Reload -; RV32MV-NEXT: lw s2, 84(sp) # 4-byte Folded Reload -; RV32MV-NEXT: lw s3, 80(sp) # 4-byte Folded Reload -; RV32MV-NEXT: lw s4, 76(sp) # 4-byte Folded Reload -; RV32MV-NEXT: lw s5, 72(sp) # 4-byte Folded Reload -; RV32MV-NEXT: lw s6, 68(sp) # 4-byte Folded Reload -; RV32MV-NEXT: addi sp, sp, 96 +; RV32MV-NEXT: addi sp, s0, -64 +; RV32MV-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32MV-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32MV-NEXT: lw s2, 52(sp) # 4-byte Folded Reload +; RV32MV-NEXT: lw s3, 48(sp) # 4-byte Folded Reload +; RV32MV-NEXT: lw s4, 44(sp) # 4-byte Folded Reload +; RV32MV-NEXT: lw s5, 40(sp) # 4-byte Folded Reload +; RV32MV-NEXT: lw s6, 36(sp) # 4-byte Folded Reload +; RV32MV-NEXT: addi sp, sp, 64 ; RV32MV-NEXT: ret ; ; RV64MV-LABEL: test_srem_vec: ; RV64MV: # %bb.0: -; RV64MV-NEXT: addi sp, sp, -96 -; RV64MV-NEXT: sd ra, 88(sp) # 8-byte Folded Spill -; RV64MV-NEXT: sd s0, 80(sp) # 8-byte Folded Spill -; RV64MV-NEXT: addi s0, sp, 96 +; RV64MV-NEXT: addi sp, sp, -64 +; RV64MV-NEXT: sd ra, 56(sp) # 8-byte Folded Spill +; RV64MV-NEXT: sd s0, 48(sp) # 8-byte Folded Spill +; RV64MV-NEXT: addi s0, sp, 64 ; RV64MV-NEXT: andi sp, sp, -32 ; RV64MV-NEXT: lwu a1, 8(a0) ; RV64MV-NEXT: ld a2, 0(a0) @@ -732,7 +732,7 @@ ; RV64MV-NEXT: ld a5, %lo(.LCPI3_1)(a5) ; RV64MV-NEXT: srai a1, a1, 31 ; RV64MV-NEXT: sub a2, a2, a4 -; RV64MV-NEXT: sd a2, 32(sp) +; RV64MV-NEXT: sd a2, 0(sp) ; RV64MV-NEXT: mulh a2, a1, a5 ; RV64MV-NEXT: srli a4, a2, 63 ; RV64MV-NEXT: srai a2, a2, 1 @@ -742,7 +742,7 @@ ; RV64MV-NEXT: ld a5, %lo(.LCPI3_2)(a5) ; RV64MV-NEXT: add a2, a4, a2 ; RV64MV-NEXT: add a1, a1, a2 -; RV64MV-NEXT: sd a1, 48(sp) +; RV64MV-NEXT: sd a1, 16(sp) ; RV64MV-NEXT: mulh a1, a3, a5 ; RV64MV-NEXT: srli a2, a1, 63 ; RV64MV-NEXT: srai a1, a1, 1 @@ -750,9 +750,9 @@ ; RV64MV-NEXT: slli a2, a1, 3 ; RV64MV-NEXT: sub a1, a1, a2 ; RV64MV-NEXT: add a1, a3, a1 -; RV64MV-NEXT: sd a1, 40(sp) +; RV64MV-NEXT: sd a1, 8(sp) +; RV64MV-NEXT: mv a1, sp ; RV64MV-NEXT: vsetivli zero, 4, e64, m2, ta, mu -; RV64MV-NEXT: addi a1, sp, 32 ; RV64MV-NEXT: vle64.v v8, (a1) ; RV64MV-NEXT: lui a1, %hi(.LCPI3_3) ; RV64MV-NEXT: addi a1, a1, %lo(.LCPI3_3) @@ -781,10 +781,10 @@ ; RV64MV-NEXT: srli a2, a2, 62 ; RV64MV-NEXT: or a1, a2, a1 ; RV64MV-NEXT: sw a1, 8(a0) -; RV64MV-NEXT: addi sp, s0, -96 -; RV64MV-NEXT: ld ra, 88(sp) # 8-byte Folded Reload -; RV64MV-NEXT: ld s0, 80(sp) # 8-byte Folded Reload -; RV64MV-NEXT: addi sp, sp, 96 +; RV64MV-NEXT: addi sp, s0, -64 +; RV64MV-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; RV64MV-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; RV64MV-NEXT: addi sp, sp, 64 ; RV64MV-NEXT: ret %ld = load <3 x i33>, <3 x i33>* %X %srem = srem <3 x i33> %ld, diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll @@ -544,8 +544,8 @@ ; RV32MV-NEXT: or a1, a1, a2 ; RV32MV-NEXT: andi a1, a1, 2047 ; RV32MV-NEXT: sh a1, 12(sp) -; RV32MV-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32MV-NEXT: addi a1, sp, 8 +; RV32MV-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32MV-NEXT: vle16.v v8, (a1) ; RV32MV-NEXT: vmv.v.i v9, 10 ; RV32MV-NEXT: li a1, 9 @@ -609,8 +609,8 @@ ; RV64MV-NEXT: slli a1, a1, 42 ; RV64MV-NEXT: srli a1, a1, 53 ; RV64MV-NEXT: sh a1, 10(sp) -; RV64MV-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64MV-NEXT: addi a1, sp, 8 +; RV64MV-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64MV-NEXT: vle16.v v8, (a1) ; RV64MV-NEXT: vmv.v.i v9, 10 ; RV64MV-NEXT: li a1, 9