diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -382,39 +382,61 @@ []))); } -class IndexSet { +// Collect the valid indexes into 'R' under NF and LMUL values from TUPLE_INDEX. +// When NF = 2, the valid TUPLE_INDEX is 0 and 1. +// For example, when LMUL = 4, the potential valid indexes is +// [8, 12, 16, 20, 24, 28, 4]. However, not all these indexes are valid under +// NF = 2. For example, 28 is not valid under LMUL = 4, NF = 2 and TUPLE_INDEX = 0. +// The filter is +// (tuple_index + i) x lmul <= (tuple_index x lmul) + 32 - (nf x lmul) +// +// Use START = 0, LMUL = 4 and NF = 2 as the example, +// i x 4 <= 24 +// The class will return [8, 12, 16, 20, 24, 4]. +// Use START = 1, LMUL = 4 and NF = 2 as the example, +// (1 + i) x 4 <= 28 +// The class will return [12, 16, 20, 24, 28, 8]. +// +class IndexSet { list R = !foldl([], - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, - 23, 24, 25, 26, 27, 28, 29, 30, 31], + !if(isV0, [0], + !cond( + !eq(lmul, 1): + [8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 1, 2, 3, 4, 5, 6, 7], + !eq(lmul, 2): + [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3], + !eq(lmul, 4): + [2, 3, 4, 5, 6, 7, 1])), L, i, !listconcat(L, - !if(!and( - !le(!mul(index, lmul), !mul(i, lmul)), - !le(!mul(i, lmul), - !sub(!add(32, !mul(index, lmul)), !mul(nf, lmul))) - ), [!mul(i, lmul)], []))); + !if(!le(!mul(!add(i, tuple_index), lmul), + !sub(!add(32, !mul(tuple_index, lmul)), !mul(nf, lmul))), + [!mul(!add(i, tuple_index), lmul)], []))); } -class VRegList LIn, int start, int nf, int lmul, bit NoV0> { +// This class returns a list of vector register collections. +// For example, for NF = 2 and LMUL = 4, +// it will return +// ([ V8M4, V12M4, V16M4, V20M4, V24M4, V4M4], +// [V12M4, V16M4, V20M4, V24M4, V28M4, V8M4]) +// +class VRegList LIn, int start, int nf, int lmul, bit isV0> { list L = !if(!ge(start, nf), LIn, !listconcat( [!dag(add, - !foreach(i, - !if(NoV0, - !tail(IndexSet.R), - [!head(IndexSet.R)]), + !foreach(i, IndexSet.R, !cast("V" # i # !cond(!eq(lmul, 2): "M2", !eq(lmul, 4): "M4", true: ""))), !listsplat("", - !if(NoV0, - !size(!tail(IndexSet.R)), - !size([!head(IndexSet.R)]))))], - VRegList.L)); + !size(IndexSet.R)))], + VRegList.L)); } // Vector registers @@ -464,10 +486,10 @@ foreach n = NFList.L in { def "VN" # n # "M" # m # "NoV0": RegisterTuples< SubRegSet.L, - VRegList<[], 0, n, m, 1>.L>; + VRegList<[], 0, n, m, false>.L>; def "VN" # n # "M" # m # "V0" : RegisterTuples< SubRegSet.L, - VRegList<[], 0, n, m, 0>.L>; + VRegList<[], 0, n, m, true>.L>; } } @@ -541,12 +563,13 @@ foreach m = LMULList.m in { foreach nf = NFList.L in { - def "VRN" # nf # "M" # m: VReg<[untyped], - (add !cast("VN" # nf # "M" # m # "V0"), !cast("VN" # nf # "M" # m # "NoV0")), - !mul(nf, m)>; def "VRN" # nf # "M" # m # "NoV0": VReg<[untyped], (add !cast("VN" # nf # "M" # m # "NoV0")), !mul(nf, m)>; + def "VRN" # nf # "M" # m: VReg<[untyped], + (add !cast("VN" # nf # "M" # m # "NoV0"), + !cast("VN" # nf # "M" # m # "V0")), + !mul(nf, m)>; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll b/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll --- a/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll @@ -9,9 +9,10 @@ define void @test_vsseg2_mask_nxv16i16( %val, i16* %base, %mask, i32 %vl) { ; CHECK-LABEL: test_vsseg2_mask_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv4r.v v4, v8 +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 +; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vsseg2e16.v v4, (a0), v0.t +; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll @@ -11,8 +11,8 @@ ; SPILL-O0-NEXT: csrr a2, vlenb ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; SPILL-O0-NEXT: vlseg2e32.v v0, (a0) -; SPILL-O0-NEXT: vmv1r.v v8, v1 +; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vmv1r.v v8, v9 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -31,12 +31,12 @@ ; SPILL-O2-NEXT: slli a2, a2, 1 ; SPILL-O2-NEXT: sub sp, sp, a2 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; SPILL-O2-NEXT: vlseg2e32.v v0, (a0) +; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb -; SPILL-O2-NEXT: vs1r.v v0, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: add a0, a0, a1 -; SPILL-O2-NEXT: vs1r.v v1, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 @@ -65,8 +65,8 @@ ; SPILL-O0-NEXT: csrr a2, vlenb ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; SPILL-O0-NEXT: vlseg2e32.v v0, (a0) -; SPILL-O0-NEXT: vmv1r.v v8, v1 +; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vmv1r.v v8, v9 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -85,12 +85,12 @@ ; SPILL-O2-NEXT: slli a2, a2, 1 ; SPILL-O2-NEXT: sub sp, sp, a2 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; SPILL-O2-NEXT: vlseg2e32.v v0, (a0) +; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb -; SPILL-O2-NEXT: vs1r.v v0, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: add a0, a0, a1 -; SPILL-O2-NEXT: vs1r.v v1, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 @@ -120,8 +120,8 @@ ; SPILL-O0-NEXT: slli a2, a2, 1 ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; SPILL-O0-NEXT: vlseg2e32.v v0, (a0) -; SPILL-O0-NEXT: vmv2r.v v8, v2 +; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vmv2r.v v8, v10 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -141,13 +141,13 @@ ; SPILL-O2-NEXT: slli a2, a2, 2 ; SPILL-O2-NEXT: sub sp, sp, a2 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; SPILL-O2-NEXT: vlseg2e32.v v0, (a0) +; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb ; SPILL-O2-NEXT: slli a1, a1, 1 -; SPILL-O2-NEXT: vs2r.v v0, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: add a0, a0, a1 -; SPILL-O2-NEXT: vs2r.v v2, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 @@ -178,8 +178,8 @@ ; SPILL-O0-NEXT: slli a2, a2, 2 ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; SPILL-O0-NEXT: vlseg2e32.v v0, (a0) -; SPILL-O0-NEXT: vmv4r.v v8, v4 +; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vmv4r.v v8, v12 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -199,13 +199,13 @@ ; SPILL-O2-NEXT: slli a2, a2, 3 ; SPILL-O2-NEXT: sub sp, sp, a2 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; SPILL-O2-NEXT: vlseg2e32.v v0, (a0) +; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb ; SPILL-O2-NEXT: slli a1, a1, 2 -; SPILL-O2-NEXT: vs4r.v v0, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: add a0, a0, a1 -; SPILL-O2-NEXT: vs4r.v v4, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 @@ -236,8 +236,8 @@ ; SPILL-O0-NEXT: slli a2, a2, 1 ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; SPILL-O0-NEXT: vlseg3e32.v v0, (a0) -; SPILL-O0-NEXT: vmv2r.v v8, v2 +; SPILL-O0-NEXT: vlseg3e32.v v8, (a0) +; SPILL-O0-NEXT: vmv2r.v v8, v10 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -258,15 +258,15 @@ ; SPILL-O2-NEXT: mul a2, a2, a3 ; SPILL-O2-NEXT: sub sp, sp, a2 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; SPILL-O2-NEXT: vlseg3e32.v v0, (a0) +; SPILL-O2-NEXT: vlseg3e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb ; SPILL-O2-NEXT: slli a1, a1, 1 -; SPILL-O2-NEXT: vs2r.v v0, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: add a0, a0, a1 -; SPILL-O2-NEXT: vs2r.v v2, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: add a0, a0, a1 -; SPILL-O2-NEXT: vs2r.v v4, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll @@ -11,8 +11,8 @@ ; SPILL-O0-NEXT: csrr a2, vlenb ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; SPILL-O0-NEXT: vlseg2e32.v v0, (a0) -; SPILL-O0-NEXT: vmv1r.v v8, v1 +; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vmv1r.v v8, v9 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -31,12 +31,12 @@ ; SPILL-O2-NEXT: slli a2, a2, 1 ; SPILL-O2-NEXT: sub sp, sp, a2 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; SPILL-O2-NEXT: vlseg2e32.v v0, (a0) +; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb -; SPILL-O2-NEXT: vs1r.v v0, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: add a0, a0, a1 -; SPILL-O2-NEXT: vs1r.v v1, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 @@ -65,8 +65,8 @@ ; SPILL-O0-NEXT: csrr a2, vlenb ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; SPILL-O0-NEXT: vlseg2e32.v v0, (a0) -; SPILL-O0-NEXT: vmv1r.v v8, v1 +; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vmv1r.v v8, v9 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -85,12 +85,12 @@ ; SPILL-O2-NEXT: slli a2, a2, 1 ; SPILL-O2-NEXT: sub sp, sp, a2 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; SPILL-O2-NEXT: vlseg2e32.v v0, (a0) +; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb -; SPILL-O2-NEXT: vs1r.v v0, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: add a0, a0, a1 -; SPILL-O2-NEXT: vs1r.v v1, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 @@ -120,8 +120,8 @@ ; SPILL-O0-NEXT: slli a2, a2, 1 ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; SPILL-O0-NEXT: vlseg2e32.v v0, (a0) -; SPILL-O0-NEXT: vmv2r.v v8, v2 +; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vmv2r.v v8, v10 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -141,13 +141,13 @@ ; SPILL-O2-NEXT: slli a2, a2, 2 ; SPILL-O2-NEXT: sub sp, sp, a2 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; SPILL-O2-NEXT: vlseg2e32.v v0, (a0) +; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb ; SPILL-O2-NEXT: slli a1, a1, 1 -; SPILL-O2-NEXT: vs2r.v v0, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: add a0, a0, a1 -; SPILL-O2-NEXT: vs2r.v v2, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 @@ -178,8 +178,8 @@ ; SPILL-O0-NEXT: slli a2, a2, 2 ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; SPILL-O0-NEXT: vlseg2e32.v v0, (a0) -; SPILL-O0-NEXT: vmv4r.v v8, v4 +; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) +; SPILL-O0-NEXT: vmv4r.v v8, v12 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -199,13 +199,13 @@ ; SPILL-O2-NEXT: slli a2, a2, 3 ; SPILL-O2-NEXT: sub sp, sp, a2 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; SPILL-O2-NEXT: vlseg2e32.v v0, (a0) +; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb ; SPILL-O2-NEXT: slli a1, a1, 2 -; SPILL-O2-NEXT: vs4r.v v0, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: add a0, a0, a1 -; SPILL-O2-NEXT: vs4r.v v4, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 @@ -236,8 +236,8 @@ ; SPILL-O0-NEXT: slli a2, a2, 1 ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; SPILL-O0-NEXT: vlseg3e32.v v0, (a0) -; SPILL-O0-NEXT: vmv2r.v v8, v2 +; SPILL-O0-NEXT: vlseg3e32.v v8, (a0) +; SPILL-O0-NEXT: vmv2r.v v8, v10 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -258,15 +258,15 @@ ; SPILL-O2-NEXT: mul a2, a2, a3 ; SPILL-O2-NEXT: sub sp, sp, a2 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; SPILL-O2-NEXT: vlseg3e32.v v0, (a0) +; SPILL-O2-NEXT: vlseg3e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb ; SPILL-O2-NEXT: slli a1, a1, 1 -; SPILL-O2-NEXT: vs2r.v v0, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: add a0, a0, a1 -; SPILL-O2-NEXT: vs2r.v v2, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: add a0, a0, a1 -; SPILL-O2-NEXT: vs2r.v v4, (a0) # Unknown-size Folded Spill +; SPILL-O2-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll @@ -9,8 +9,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i32 %vl) @@ -39,8 +39,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16* %base, %index, i32 %vl) @@ -69,8 +69,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16* %base, %index, i32 %vl) @@ -99,8 +99,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -129,8 +129,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -159,8 +159,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -189,8 +189,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -201,12 +201,12 @@ define @test_vloxseg3_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -221,8 +221,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -233,12 +233,12 @@ define @test_vloxseg3_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -253,8 +253,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -265,12 +265,12 @@ define @test_vloxseg3_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -285,8 +285,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -297,13 +297,13 @@ define @test_vloxseg4_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -318,8 +318,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -330,13 +330,13 @@ define @test_vloxseg4_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -351,8 +351,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -363,13 +363,13 @@ define @test_vloxseg4_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -384,8 +384,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -396,14 +396,14 @@ define @test_vloxseg5_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -418,8 +418,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -430,14 +430,14 @@ define @test_vloxseg5_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -452,8 +452,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -464,14 +464,14 @@ define @test_vloxseg5_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -486,8 +486,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -498,15 +498,15 @@ define @test_vloxseg6_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -521,8 +521,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -533,15 +533,15 @@ define @test_vloxseg6_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -556,8 +556,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -568,15 +568,15 @@ define @test_vloxseg6_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -591,8 +591,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -603,16 +603,16 @@ define @test_vloxseg7_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -627,8 +627,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -639,16 +639,16 @@ define @test_vloxseg7_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -663,8 +663,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -675,16 +675,16 @@ define @test_vloxseg7_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -699,8 +699,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -736,8 +736,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -773,8 +773,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -810,8 +810,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -840,8 +840,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -870,8 +870,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -900,8 +900,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -931,8 +931,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -943,12 +943,12 @@ define @test_vloxseg3_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -963,8 +963,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -994,8 +994,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -1027,8 +1027,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -1060,8 +1060,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -1092,8 +1092,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1122,8 +1122,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1152,8 +1152,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1182,8 +1182,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1194,12 +1194,12 @@ define @test_vloxseg3_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1214,8 +1214,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1226,12 +1226,12 @@ define @test_vloxseg3_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1246,8 +1246,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1258,12 +1258,12 @@ define @test_vloxseg3_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1278,8 +1278,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1290,13 +1290,13 @@ define @test_vloxseg4_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1311,8 +1311,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1323,13 +1323,13 @@ define @test_vloxseg4_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1344,8 +1344,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1356,13 +1356,13 @@ define @test_vloxseg4_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1377,8 +1377,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1389,14 +1389,14 @@ define @test_vloxseg5_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1411,8 +1411,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1423,14 +1423,14 @@ define @test_vloxseg5_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1445,8 +1445,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1457,14 +1457,14 @@ define @test_vloxseg5_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1479,8 +1479,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1491,15 +1491,15 @@ define @test_vloxseg6_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1514,8 +1514,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1526,15 +1526,15 @@ define @test_vloxseg6_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1549,8 +1549,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1561,15 +1561,15 @@ define @test_vloxseg6_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1584,8 +1584,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1596,16 +1596,16 @@ define @test_vloxseg7_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1620,8 +1620,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1632,16 +1632,16 @@ define @test_vloxseg7_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1656,8 +1656,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1668,16 +1668,16 @@ define @test_vloxseg7_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1692,8 +1692,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1729,8 +1729,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1766,8 +1766,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1803,8 +1803,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -1833,8 +1833,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -1863,8 +1863,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -1893,8 +1893,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -1905,12 +1905,12 @@ define @test_vloxseg3_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1925,8 +1925,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -1937,12 +1937,12 @@ define @test_vloxseg3_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1957,8 +1957,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -1988,8 +1988,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2000,13 +2000,13 @@ define @test_vloxseg4_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2021,8 +2021,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2033,13 +2033,13 @@ define @test_vloxseg4_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2054,8 +2054,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2066,13 +2066,13 @@ define @test_vloxseg4_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2087,8 +2087,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2099,14 +2099,14 @@ define @test_vloxseg5_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2121,8 +2121,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2133,14 +2133,14 @@ define @test_vloxseg5_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2155,8 +2155,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2167,14 +2167,14 @@ define @test_vloxseg5_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2189,8 +2189,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2201,15 +2201,15 @@ define @test_vloxseg6_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2224,8 +2224,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2236,15 +2236,15 @@ define @test_vloxseg6_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2259,8 +2259,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2271,19 +2271,19 @@ define @test_vloxseg6_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) + %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2294,8 +2294,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2306,16 +2306,16 @@ define @test_vloxseg7_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2330,8 +2330,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2342,16 +2342,16 @@ define @test_vloxseg7_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2366,8 +2366,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2378,16 +2378,16 @@ define @test_vloxseg7_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2402,8 +2402,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2439,8 +2439,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2476,8 +2476,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2513,8 +2513,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2543,8 +2543,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2573,8 +2573,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2603,8 +2603,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2615,12 +2615,12 @@ define @test_vloxseg3_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2635,8 +2635,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2647,12 +2647,12 @@ define @test_vloxseg3_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2667,8 +2667,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2679,12 +2679,12 @@ define @test_vloxseg3_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2699,8 +2699,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2711,13 +2711,13 @@ define @test_vloxseg4_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2732,8 +2732,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2744,13 +2744,13 @@ define @test_vloxseg4_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2765,8 +2765,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2777,13 +2777,13 @@ define @test_vloxseg4_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2798,8 +2798,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2810,14 +2810,14 @@ define @test_vloxseg5_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2832,8 +2832,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2844,14 +2844,14 @@ define @test_vloxseg5_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2866,8 +2866,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2878,14 +2878,14 @@ define @test_vloxseg5_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2900,8 +2900,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2912,15 +2912,15 @@ define @test_vloxseg6_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2935,8 +2935,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2947,15 +2947,15 @@ define @test_vloxseg6_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2970,8 +2970,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2982,15 +2982,15 @@ define @test_vloxseg6_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3005,8 +3005,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -3017,16 +3017,16 @@ define @test_vloxseg7_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3041,8 +3041,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -3053,16 +3053,16 @@ define @test_vloxseg7_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3077,8 +3077,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -3089,16 +3089,16 @@ define @test_vloxseg7_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3113,8 +3113,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -3150,8 +3150,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -3187,8 +3187,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -3224,8 +3224,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3254,8 +3254,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3284,8 +3284,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3314,8 +3314,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3326,12 +3326,12 @@ define @test_vloxseg3_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3346,8 +3346,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3358,12 +3358,12 @@ define @test_vloxseg3_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3378,8 +3378,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3409,8 +3409,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3442,8 +3442,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3475,8 +3475,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3508,8 +3508,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3538,8 +3538,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3568,8 +3568,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3598,8 +3598,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3629,8 +3629,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3641,12 +3641,12 @@ define @test_vloxseg3_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3661,8 +3661,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3692,8 +3692,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3704,13 +3704,13 @@ define @test_vloxseg4_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3725,8 +3725,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3737,13 +3737,13 @@ define @test_vloxseg4_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3758,8 +3758,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3790,8 +3790,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3802,14 +3802,14 @@ define @test_vloxseg5_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3824,8 +3824,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3836,14 +3836,14 @@ define @test_vloxseg5_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3858,8 +3858,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3891,8 +3891,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3903,15 +3903,15 @@ define @test_vloxseg6_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3926,8 +3926,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3938,15 +3938,15 @@ define @test_vloxseg6_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3961,8 +3961,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3973,15 +3973,15 @@ define @test_vloxseg6_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3996,8 +3996,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -4008,16 +4008,16 @@ define @test_vloxseg7_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4032,8 +4032,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -4044,16 +4044,16 @@ define @test_vloxseg7_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4068,8 +4068,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -4080,16 +4080,16 @@ define @test_vloxseg7_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4104,8 +4104,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -4141,8 +4141,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -4178,8 +4178,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -4215,8 +4215,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32* %base, %index, i32 %vl) @@ -4245,8 +4245,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32* %base, %index, i32 %vl) @@ -4275,8 +4275,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32* %base, %index, i32 %vl) @@ -4305,8 +4305,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4335,8 +4335,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4365,8 +4365,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4395,8 +4395,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4407,12 +4407,12 @@ define @test_vloxseg3_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4427,8 +4427,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4439,12 +4439,12 @@ define @test_vloxseg3_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4459,8 +4459,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4490,8 +4490,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4502,13 +4502,13 @@ define @test_vloxseg4_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4523,8 +4523,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4535,13 +4535,13 @@ define @test_vloxseg4_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4556,8 +4556,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4568,13 +4568,13 @@ define @test_vloxseg4_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4589,8 +4589,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4601,17 +4601,17 @@ define @test_vloxseg5_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4623,8 +4623,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4635,14 +4635,14 @@ define @test_vloxseg5_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4657,8 +4657,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4669,14 +4669,14 @@ define @test_vloxseg5_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4691,8 +4691,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4703,15 +4703,15 @@ define @test_vloxseg6_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4726,8 +4726,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4738,15 +4738,15 @@ define @test_vloxseg6_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4761,8 +4761,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4773,15 +4773,15 @@ define @test_vloxseg6_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4796,8 +4796,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4808,16 +4808,16 @@ define @test_vloxseg7_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4832,8 +4832,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4844,16 +4844,16 @@ define @test_vloxseg7_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4868,8 +4868,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4880,16 +4880,16 @@ define @test_vloxseg7_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4904,8 +4904,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4941,8 +4941,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4978,8 +4978,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -5015,8 +5015,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5045,8 +5045,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5075,8 +5075,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5105,8 +5105,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5117,12 +5117,12 @@ define @test_vloxseg3_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5137,8 +5137,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5149,12 +5149,12 @@ define @test_vloxseg3_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5169,8 +5169,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5181,12 +5181,12 @@ define @test_vloxseg3_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5201,8 +5201,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5213,13 +5213,13 @@ define @test_vloxseg4_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5234,8 +5234,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5246,13 +5246,13 @@ define @test_vloxseg4_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5267,8 +5267,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5279,13 +5279,13 @@ define @test_vloxseg4_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5300,8 +5300,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5312,14 +5312,14 @@ define @test_vloxseg5_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5334,8 +5334,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5346,14 +5346,14 @@ define @test_vloxseg5_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5368,8 +5368,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5380,14 +5380,14 @@ define @test_vloxseg5_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5402,8 +5402,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5414,15 +5414,15 @@ define @test_vloxseg6_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5437,8 +5437,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5449,15 +5449,15 @@ define @test_vloxseg6_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5472,8 +5472,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5484,15 +5484,15 @@ define @test_vloxseg6_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5507,8 +5507,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5519,16 +5519,16 @@ define @test_vloxseg7_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5543,8 +5543,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5555,16 +5555,16 @@ define @test_vloxseg7_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5579,8 +5579,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5591,16 +5591,16 @@ define @test_vloxseg7_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5615,8 +5615,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5652,8 +5652,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5689,8 +5689,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5726,8 +5726,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8* %base, %index, i32 %vl) @@ -5756,8 +5756,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8* %base, %index, i32 %vl) @@ -5786,8 +5786,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -5816,8 +5816,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -5846,8 +5846,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -5876,8 +5876,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -5888,12 +5888,12 @@ define @test_vloxseg3_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5908,8 +5908,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -5920,12 +5920,12 @@ define @test_vloxseg3_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5940,8 +5940,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -5952,12 +5952,12 @@ define @test_vloxseg3_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5972,8 +5972,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -5984,13 +5984,13 @@ define @test_vloxseg4_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6005,8 +6005,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6017,13 +6017,13 @@ define @test_vloxseg4_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6038,8 +6038,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6050,13 +6050,13 @@ define @test_vloxseg4_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6071,8 +6071,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6083,14 +6083,14 @@ define @test_vloxseg5_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6105,8 +6105,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6117,14 +6117,14 @@ define @test_vloxseg5_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6139,8 +6139,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6151,14 +6151,14 @@ define @test_vloxseg5_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6173,8 +6173,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6185,15 +6185,15 @@ define @test_vloxseg6_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6208,8 +6208,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6220,15 +6220,15 @@ define @test_vloxseg6_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6243,8 +6243,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6255,15 +6255,15 @@ define @test_vloxseg6_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6278,8 +6278,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6290,16 +6290,16 @@ define @test_vloxseg7_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6314,8 +6314,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6326,16 +6326,16 @@ define @test_vloxseg7_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6350,8 +6350,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6362,16 +6362,16 @@ define @test_vloxseg7_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6386,8 +6386,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6423,8 +6423,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6460,8 +6460,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6497,8 +6497,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6527,8 +6527,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6557,8 +6557,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6587,8 +6587,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6599,12 +6599,12 @@ define @test_vloxseg3_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6619,8 +6619,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6631,12 +6631,12 @@ define @test_vloxseg3_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6651,8 +6651,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6663,12 +6663,12 @@ define @test_vloxseg3_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6683,8 +6683,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6695,13 +6695,13 @@ define @test_vloxseg4_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6716,8 +6716,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6728,13 +6728,13 @@ define @test_vloxseg4_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6749,8 +6749,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6761,13 +6761,13 @@ define @test_vloxseg4_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6782,8 +6782,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6794,14 +6794,14 @@ define @test_vloxseg5_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6816,8 +6816,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6828,14 +6828,14 @@ define @test_vloxseg5_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6850,8 +6850,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6862,14 +6862,14 @@ define @test_vloxseg5_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6884,8 +6884,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6896,15 +6896,15 @@ define @test_vloxseg6_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6919,8 +6919,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6931,15 +6931,15 @@ define @test_vloxseg6_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6954,8 +6954,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6966,15 +6966,15 @@ define @test_vloxseg6_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6989,8 +6989,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -7001,16 +7001,16 @@ define @test_vloxseg7_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7025,8 +7025,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -7037,16 +7037,16 @@ define @test_vloxseg7_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7061,8 +7061,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -7073,16 +7073,16 @@ define @test_vloxseg7_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7097,8 +7097,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -7134,8 +7134,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -7171,8 +7171,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -7208,8 +7208,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7238,8 +7238,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7268,8 +7268,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7298,8 +7298,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7310,12 +7310,12 @@ define @test_vloxseg3_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7330,8 +7330,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7342,12 +7342,12 @@ define @test_vloxseg3_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7362,8 +7362,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7374,12 +7374,12 @@ define @test_vloxseg3_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7394,8 +7394,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7427,8 +7427,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7460,8 +7460,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7493,8 +7493,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half* %base, %index, i32 %vl) @@ -7523,8 +7523,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half* %base, %index, i32 %vl) @@ -7553,8 +7553,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half* %base, %index, i32 %vl) @@ -7583,8 +7583,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double* %base, %index, i32 %vl) @@ -7613,8 +7613,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double* %base, %index, i32 %vl) @@ -7643,8 +7643,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double* %base, %index, i32 %vl) @@ -7673,8 +7673,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7703,8 +7703,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7733,8 +7733,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7763,8 +7763,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7775,12 +7775,12 @@ define @test_vloxseg3_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7795,8 +7795,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7807,12 +7807,12 @@ define @test_vloxseg3_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7827,8 +7827,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7839,12 +7839,12 @@ define @test_vloxseg3_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7859,8 +7859,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7871,13 +7871,13 @@ define @test_vloxseg4_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7892,8 +7892,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7904,13 +7904,13 @@ define @test_vloxseg4_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7925,8 +7925,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7937,13 +7937,13 @@ define @test_vloxseg4_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7958,8 +7958,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7970,14 +7970,14 @@ define @test_vloxseg5_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7992,8 +7992,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8004,14 +8004,14 @@ define @test_vloxseg5_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8026,8 +8026,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8038,14 +8038,14 @@ define @test_vloxseg5_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8060,8 +8060,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8072,15 +8072,15 @@ define @test_vloxseg6_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8095,8 +8095,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8107,15 +8107,15 @@ define @test_vloxseg6_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8130,8 +8130,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8142,15 +8142,15 @@ define @test_vloxseg6_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8165,8 +8165,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8177,16 +8177,16 @@ define @test_vloxseg7_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8201,8 +8201,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8213,16 +8213,16 @@ define @test_vloxseg7_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8237,8 +8237,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8249,16 +8249,16 @@ define @test_vloxseg7_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8273,8 +8273,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8310,8 +8310,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8347,8 +8347,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8384,8 +8384,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8414,8 +8414,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8444,8 +8444,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8474,8 +8474,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8486,12 +8486,12 @@ define @test_vloxseg3_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8506,8 +8506,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8518,12 +8518,12 @@ define @test_vloxseg3_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8538,8 +8538,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8550,12 +8550,12 @@ define @test_vloxseg3_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8570,8 +8570,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8582,13 +8582,13 @@ define @test_vloxseg4_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8603,8 +8603,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8615,13 +8615,13 @@ define @test_vloxseg4_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8636,8 +8636,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8648,13 +8648,13 @@ define @test_vloxseg4_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8669,8 +8669,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8681,14 +8681,14 @@ define @test_vloxseg5_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8703,8 +8703,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8715,14 +8715,14 @@ define @test_vloxseg5_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8737,8 +8737,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8749,14 +8749,14 @@ define @test_vloxseg5_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8771,8 +8771,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8783,15 +8783,15 @@ define @test_vloxseg6_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8806,8 +8806,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8818,15 +8818,15 @@ define @test_vloxseg6_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8841,8 +8841,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8853,15 +8853,15 @@ define @test_vloxseg6_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8876,8 +8876,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8888,16 +8888,16 @@ define @test_vloxseg7_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8912,8 +8912,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8924,16 +8924,16 @@ define @test_vloxseg7_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8948,8 +8948,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8960,16 +8960,16 @@ define @test_vloxseg7_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8984,8 +8984,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -9021,8 +9021,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -9058,8 +9058,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -9095,8 +9095,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9125,8 +9125,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9155,8 +9155,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9185,8 +9185,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9197,12 +9197,12 @@ define @test_vloxseg3_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9217,8 +9217,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9229,12 +9229,12 @@ define @test_vloxseg3_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9249,8 +9249,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9261,12 +9261,12 @@ define @test_vloxseg3_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9281,8 +9281,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9293,13 +9293,13 @@ define @test_vloxseg4_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9314,8 +9314,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9326,13 +9326,13 @@ define @test_vloxseg4_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9347,8 +9347,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9359,13 +9359,13 @@ define @test_vloxseg4_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9380,8 +9380,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9392,14 +9392,14 @@ define @test_vloxseg5_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9414,8 +9414,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9426,14 +9426,14 @@ define @test_vloxseg5_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9448,8 +9448,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9460,14 +9460,14 @@ define @test_vloxseg5_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9482,8 +9482,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9494,15 +9494,15 @@ define @test_vloxseg6_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9517,8 +9517,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9529,15 +9529,15 @@ define @test_vloxseg6_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9552,8 +9552,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9564,15 +9564,15 @@ define @test_vloxseg6_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9587,8 +9587,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9599,16 +9599,16 @@ define @test_vloxseg7_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9623,8 +9623,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9635,16 +9635,16 @@ define @test_vloxseg7_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9659,8 +9659,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9671,16 +9671,16 @@ define @test_vloxseg7_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9695,8 +9695,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9732,8 +9732,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9769,8 +9769,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9806,8 +9806,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -9836,8 +9836,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -9866,8 +9866,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -9896,8 +9896,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -9908,12 +9908,12 @@ define @test_vloxseg3_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9928,8 +9928,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -9940,12 +9940,12 @@ define @test_vloxseg3_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9960,8 +9960,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -9972,12 +9972,12 @@ define @test_vloxseg3_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9992,8 +9992,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10004,13 +10004,13 @@ define @test_vloxseg4_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10025,8 +10025,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10037,13 +10037,13 @@ define @test_vloxseg4_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10058,8 +10058,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10070,13 +10070,13 @@ define @test_vloxseg4_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10091,8 +10091,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10103,14 +10103,14 @@ define @test_vloxseg5_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10125,8 +10125,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10137,14 +10137,14 @@ define @test_vloxseg5_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10159,8 +10159,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10171,14 +10171,14 @@ define @test_vloxseg5_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10193,8 +10193,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10205,15 +10205,15 @@ define @test_vloxseg6_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10228,8 +10228,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10240,15 +10240,15 @@ define @test_vloxseg6_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10263,8 +10263,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10275,15 +10275,15 @@ define @test_vloxseg6_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10298,8 +10298,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10310,16 +10310,16 @@ define @test_vloxseg7_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10334,8 +10334,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10346,16 +10346,16 @@ define @test_vloxseg7_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10370,8 +10370,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10382,16 +10382,16 @@ define @test_vloxseg7_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10406,8 +10406,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10443,8 +10443,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10480,8 +10480,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10517,8 +10517,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10547,8 +10547,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10577,8 +10577,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10607,8 +10607,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10619,12 +10619,12 @@ define @test_vloxseg3_mask_nxv8f16_nxv8i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10639,8 +10639,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10651,12 +10651,12 @@ define @test_vloxseg3_mask_nxv8f16_nxv8i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10671,8 +10671,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10702,8 +10702,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10735,8 +10735,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10768,8 +10768,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10801,8 +10801,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float* %base, %index, i32 %vl) @@ -10831,8 +10831,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float* %base, %index, i32 %vl) @@ -10861,8 +10861,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float* %base, %index, i32 %vl) @@ -10891,8 +10891,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -10921,8 +10921,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -10951,8 +10951,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -10981,8 +10981,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -10993,12 +10993,12 @@ define @test_vloxseg3_mask_nxv2f64_nxv2i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11013,8 +11013,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -11025,12 +11025,12 @@ define @test_vloxseg3_mask_nxv2f64_nxv2i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11045,8 +11045,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -11057,12 +11057,12 @@ define @test_vloxseg3_mask_nxv2f64_nxv2i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11077,8 +11077,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -11110,8 +11110,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -11143,8 +11143,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -11176,8 +11176,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11206,8 +11206,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11236,8 +11236,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11266,8 +11266,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11278,12 +11278,12 @@ define @test_vloxseg3_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11298,8 +11298,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11310,12 +11310,12 @@ define @test_vloxseg3_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11330,8 +11330,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11361,8 +11361,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11373,13 +11373,13 @@ define @test_vloxseg4_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11394,8 +11394,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11406,13 +11406,13 @@ define @test_vloxseg4_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11427,8 +11427,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11439,13 +11439,13 @@ define @test_vloxseg4_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11460,8 +11460,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11472,14 +11472,14 @@ define @test_vloxseg5_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11494,8 +11494,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11506,14 +11506,14 @@ define @test_vloxseg5_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11528,8 +11528,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11540,14 +11540,14 @@ define @test_vloxseg5_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11562,8 +11562,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11574,15 +11574,15 @@ define @test_vloxseg6_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11597,8 +11597,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11609,15 +11609,15 @@ define @test_vloxseg6_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11632,8 +11632,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11644,15 +11644,15 @@ define @test_vloxseg6_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11667,8 +11667,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11679,16 +11679,16 @@ define @test_vloxseg7_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11703,8 +11703,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11715,16 +11715,16 @@ define @test_vloxseg7_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11739,8 +11739,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11751,16 +11751,16 @@ define @test_vloxseg7_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11775,8 +11775,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11812,8 +11812,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11849,8 +11849,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11886,8 +11886,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -11916,8 +11916,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -11946,8 +11946,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -11976,8 +11976,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -11988,12 +11988,12 @@ define @test_vloxseg3_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12008,8 +12008,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12020,12 +12020,12 @@ define @test_vloxseg3_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12040,8 +12040,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12052,12 +12052,12 @@ define @test_vloxseg3_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12072,8 +12072,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12084,13 +12084,13 @@ define @test_vloxseg4_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12105,8 +12105,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12117,13 +12117,13 @@ define @test_vloxseg4_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12138,8 +12138,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12150,13 +12150,13 @@ define @test_vloxseg4_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12171,8 +12171,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12183,14 +12183,14 @@ define @test_vloxseg5_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12205,8 +12205,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12217,14 +12217,14 @@ define @test_vloxseg5_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12239,8 +12239,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12251,14 +12251,14 @@ define @test_vloxseg5_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12273,8 +12273,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12285,15 +12285,15 @@ define @test_vloxseg6_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12308,8 +12308,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12320,15 +12320,15 @@ define @test_vloxseg6_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12343,8 +12343,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12355,15 +12355,15 @@ define @test_vloxseg6_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12378,8 +12378,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12390,16 +12390,16 @@ define @test_vloxseg7_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12414,8 +12414,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12426,16 +12426,16 @@ define @test_vloxseg7_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12450,8 +12450,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12462,16 +12462,16 @@ define @test_vloxseg7_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12486,8 +12486,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12523,8 +12523,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12560,8 +12560,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12597,8 +12597,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12627,8 +12627,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12657,8 +12657,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) @@ -12687,8 +12687,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12699,12 +12699,12 @@ define @test_vloxseg3_mask_nxv4f32_nxv4i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12719,8 +12719,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12731,12 +12731,12 @@ define @test_vloxseg3_mask_nxv4f32_nxv4i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12751,8 +12751,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) @@ -12763,12 +12763,12 @@ define @test_vloxseg3_mask_nxv4f32_nxv4i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12783,8 +12783,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12816,8 +12816,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12849,8 +12849,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll @@ -9,8 +9,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 %vl) @@ -39,8 +39,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16* %base, %index, i64 %vl) @@ -69,8 +69,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16* %base, %index, i64 %vl) @@ -99,8 +99,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -129,8 +129,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -159,8 +159,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -189,8 +189,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -219,8 +219,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -231,12 +231,12 @@ define @test_vloxseg3_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -251,8 +251,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -263,12 +263,12 @@ define @test_vloxseg3_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -283,8 +283,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -314,8 +314,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -326,12 +326,12 @@ define @test_vloxseg3_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -346,8 +346,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -379,8 +379,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -412,8 +412,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -445,8 +445,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -478,8 +478,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -508,8 +508,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -538,8 +538,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -568,8 +568,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -599,8 +599,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -611,12 +611,12 @@ define @test_vloxseg3_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -631,8 +631,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -662,8 +662,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -695,8 +695,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -728,8 +728,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -760,8 +760,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -790,8 +790,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -820,8 +820,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -850,8 +850,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -880,8 +880,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -892,12 +892,12 @@ define @test_vloxseg3_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -912,8 +912,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -924,12 +924,12 @@ define @test_vloxseg3_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -944,8 +944,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -956,12 +956,12 @@ define @test_vloxseg3_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -976,8 +976,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -988,12 +988,12 @@ define @test_vloxseg3_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1008,8 +1008,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1020,13 +1020,13 @@ define @test_vloxseg4_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1041,8 +1041,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1053,13 +1053,13 @@ define @test_vloxseg4_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1074,8 +1074,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1086,13 +1086,13 @@ define @test_vloxseg4_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1107,8 +1107,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1119,13 +1119,13 @@ define @test_vloxseg4_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1140,8 +1140,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1152,14 +1152,14 @@ define @test_vloxseg5_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1174,8 +1174,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1186,14 +1186,14 @@ define @test_vloxseg5_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1208,8 +1208,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1220,14 +1220,14 @@ define @test_vloxseg5_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1242,8 +1242,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1254,14 +1254,14 @@ define @test_vloxseg5_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1276,8 +1276,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1288,15 +1288,15 @@ define @test_vloxseg6_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1311,8 +1311,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1323,15 +1323,15 @@ define @test_vloxseg6_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1346,8 +1346,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1358,15 +1358,15 @@ define @test_vloxseg6_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1381,8 +1381,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1393,15 +1393,15 @@ define @test_vloxseg6_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1416,8 +1416,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1428,16 +1428,16 @@ define @test_vloxseg7_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1452,8 +1452,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1464,16 +1464,16 @@ define @test_vloxseg7_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1488,8 +1488,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1500,16 +1500,16 @@ define @test_vloxseg7_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1524,8 +1524,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1536,16 +1536,16 @@ define @test_vloxseg7_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1560,8 +1560,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1597,8 +1597,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1634,8 +1634,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1671,8 +1671,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1708,8 +1708,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -1738,8 +1738,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -1768,8 +1768,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -1798,8 +1798,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -1828,8 +1828,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -1840,12 +1840,12 @@ define @test_vloxseg3_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1860,8 +1860,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -1872,12 +1872,12 @@ define @test_vloxseg3_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1892,8 +1892,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -1904,12 +1904,12 @@ define @test_vloxseg3_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1924,8 +1924,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -1936,12 +1936,12 @@ define @test_vloxseg3_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1956,8 +1956,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -1968,13 +1968,13 @@ define @test_vloxseg4_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1989,8 +1989,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2001,13 +2001,13 @@ define @test_vloxseg4_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2022,8 +2022,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2034,13 +2034,13 @@ define @test_vloxseg4_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2055,8 +2055,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2067,13 +2067,13 @@ define @test_vloxseg4_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2088,8 +2088,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2100,14 +2100,14 @@ define @test_vloxseg5_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2122,8 +2122,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2134,14 +2134,14 @@ define @test_vloxseg5_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2156,8 +2156,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2168,14 +2168,14 @@ define @test_vloxseg5_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2190,8 +2190,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2202,14 +2202,14 @@ define @test_vloxseg5_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2224,8 +2224,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2236,15 +2236,15 @@ define @test_vloxseg6_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2259,8 +2259,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2271,15 +2271,15 @@ define @test_vloxseg6_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2294,8 +2294,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2306,15 +2306,15 @@ define @test_vloxseg6_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2329,8 +2329,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2341,15 +2341,15 @@ define @test_vloxseg6_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2364,8 +2364,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2376,16 +2376,16 @@ define @test_vloxseg7_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2400,8 +2400,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2412,16 +2412,16 @@ define @test_vloxseg7_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2436,8 +2436,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2448,16 +2448,16 @@ define @test_vloxseg7_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2472,8 +2472,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2484,16 +2484,16 @@ define @test_vloxseg7_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2508,8 +2508,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2545,8 +2545,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2582,8 +2582,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2619,8 +2619,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2656,8 +2656,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2686,8 +2686,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2716,8 +2716,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -2746,8 +2746,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -2776,8 +2776,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2788,12 +2788,12 @@ define @test_vloxseg3_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2808,8 +2808,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2820,12 +2820,12 @@ define @test_vloxseg3_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2840,8 +2840,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -2871,8 +2871,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -2902,8 +2902,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2935,8 +2935,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2968,8 +2968,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -3000,8 +3000,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -3033,8 +3033,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3063,8 +3063,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3093,8 +3093,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3123,8 +3123,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3153,8 +3153,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3184,8 +3184,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3196,12 +3196,12 @@ define @test_vloxseg3_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3216,8 +3216,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3247,8 +3247,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3259,12 +3259,12 @@ define @test_vloxseg3_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3279,8 +3279,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3291,13 +3291,13 @@ define @test_vloxseg4_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3312,8 +3312,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3324,13 +3324,13 @@ define @test_vloxseg4_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3345,8 +3345,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3377,8 +3377,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3389,13 +3389,13 @@ define @test_vloxseg4_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3410,8 +3410,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3422,14 +3422,14 @@ define @test_vloxseg5_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3444,8 +3444,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3456,14 +3456,14 @@ define @test_vloxseg5_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3478,8 +3478,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3511,8 +3511,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3523,14 +3523,14 @@ define @test_vloxseg5_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3545,8 +3545,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3557,15 +3557,15 @@ define @test_vloxseg6_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3580,8 +3580,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3592,15 +3592,15 @@ define @test_vloxseg6_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3615,8 +3615,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3627,15 +3627,15 @@ define @test_vloxseg6_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3650,8 +3650,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3662,15 +3662,15 @@ define @test_vloxseg6_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3685,8 +3685,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3697,16 +3697,16 @@ define @test_vloxseg7_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3721,8 +3721,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3733,16 +3733,16 @@ define @test_vloxseg7_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3757,8 +3757,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3769,16 +3769,16 @@ define @test_vloxseg7_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3793,8 +3793,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3805,16 +3805,16 @@ define @test_vloxseg7_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3829,8 +3829,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3866,8 +3866,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3903,8 +3903,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3940,8 +3940,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3977,8 +3977,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4007,8 +4007,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4037,8 +4037,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4067,8 +4067,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4097,8 +4097,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4109,12 +4109,12 @@ define @test_vloxseg3_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4129,8 +4129,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4141,12 +4141,12 @@ define @test_vloxseg3_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4161,8 +4161,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4173,12 +4173,12 @@ define @test_vloxseg3_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4193,8 +4193,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4205,12 +4205,12 @@ define @test_vloxseg3_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4225,8 +4225,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4237,13 +4237,13 @@ define @test_vloxseg4_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4258,8 +4258,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4270,13 +4270,13 @@ define @test_vloxseg4_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4291,8 +4291,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4303,13 +4303,13 @@ define @test_vloxseg4_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4324,8 +4324,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4336,13 +4336,13 @@ define @test_vloxseg4_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4357,8 +4357,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4369,14 +4369,14 @@ define @test_vloxseg5_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4391,8 +4391,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4403,14 +4403,14 @@ define @test_vloxseg5_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4425,8 +4425,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4437,14 +4437,14 @@ define @test_vloxseg5_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4459,8 +4459,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4471,14 +4471,14 @@ define @test_vloxseg5_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4493,8 +4493,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4505,15 +4505,15 @@ define @test_vloxseg6_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4528,8 +4528,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4540,15 +4540,15 @@ define @test_vloxseg6_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4563,8 +4563,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4575,15 +4575,15 @@ define @test_vloxseg6_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4598,8 +4598,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4610,15 +4610,15 @@ define @test_vloxseg6_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4633,8 +4633,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4645,16 +4645,16 @@ define @test_vloxseg7_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4669,8 +4669,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4681,16 +4681,16 @@ define @test_vloxseg7_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4705,8 +4705,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4717,16 +4717,16 @@ define @test_vloxseg7_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4741,8 +4741,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4753,20 +4753,20 @@ define @test_vloxseg7_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) + %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4777,8 +4777,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4814,8 +4814,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4851,8 +4851,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4888,8 +4888,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4925,8 +4925,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -4955,8 +4955,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -4985,8 +4985,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5015,8 +5015,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5045,8 +5045,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5057,12 +5057,12 @@ define @test_vloxseg3_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5077,8 +5077,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5089,12 +5089,12 @@ define @test_vloxseg3_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5109,8 +5109,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5121,12 +5121,12 @@ define @test_vloxseg3_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5141,8 +5141,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5172,8 +5172,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5184,13 +5184,13 @@ define @test_vloxseg4_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5205,8 +5205,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5217,13 +5217,13 @@ define @test_vloxseg4_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5238,8 +5238,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5250,13 +5250,13 @@ define @test_vloxseg4_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5271,8 +5271,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5283,13 +5283,13 @@ define @test_vloxseg4_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5304,8 +5304,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5316,14 +5316,14 @@ define @test_vloxseg5_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5338,8 +5338,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5350,14 +5350,14 @@ define @test_vloxseg5_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5372,8 +5372,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5384,14 +5384,14 @@ define @test_vloxseg5_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5406,8 +5406,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5418,14 +5418,14 @@ define @test_vloxseg5_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5440,8 +5440,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5452,15 +5452,15 @@ define @test_vloxseg6_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5475,8 +5475,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5487,15 +5487,15 @@ define @test_vloxseg6_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5510,8 +5510,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5522,15 +5522,15 @@ define @test_vloxseg6_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5545,8 +5545,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5557,15 +5557,15 @@ define @test_vloxseg6_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5580,8 +5580,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5592,16 +5592,16 @@ define @test_vloxseg7_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5616,8 +5616,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5628,16 +5628,16 @@ define @test_vloxseg7_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5652,8 +5652,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5664,16 +5664,16 @@ define @test_vloxseg7_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5688,8 +5688,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5700,16 +5700,16 @@ define @test_vloxseg7_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5724,8 +5724,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5761,8 +5761,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5798,8 +5798,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5835,8 +5835,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5872,8 +5872,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -5902,8 +5902,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -5932,8 +5932,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -5962,8 +5962,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -5992,8 +5992,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6023,8 +6023,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6035,12 +6035,12 @@ define @test_vloxseg3_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6055,8 +6055,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6086,8 +6086,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6117,8 +6117,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6129,13 +6129,13 @@ define @test_vloxseg4_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6150,8 +6150,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6162,13 +6162,13 @@ define @test_vloxseg4_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6183,8 +6183,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6215,8 +6215,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6247,8 +6247,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6259,14 +6259,14 @@ define @test_vloxseg5_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6281,8 +6281,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6293,14 +6293,14 @@ define @test_vloxseg5_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6315,8 +6315,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6348,8 +6348,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6381,8 +6381,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6393,15 +6393,15 @@ define @test_vloxseg6_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6416,8 +6416,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6428,15 +6428,15 @@ define @test_vloxseg6_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6451,8 +6451,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6485,8 +6485,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6497,15 +6497,15 @@ define @test_vloxseg6_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6520,8 +6520,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6532,16 +6532,16 @@ define @test_vloxseg7_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6556,8 +6556,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6568,16 +6568,16 @@ define @test_vloxseg7_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6592,8 +6592,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6627,8 +6627,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6639,16 +6639,16 @@ define @test_vloxseg7_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6663,8 +6663,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6700,8 +6700,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6737,8 +6737,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6773,8 +6773,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6810,8 +6810,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i32(i64* %base, %index, i64 %vl) @@ -6840,8 +6840,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i8(i64* %base, %index, i64 %vl) @@ -6870,8 +6870,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i64(i64* %base, %index, i64 %vl) @@ -6900,8 +6900,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i16(i64* %base, %index, i64 %vl) @@ -6930,8 +6930,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -6960,8 +6960,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -6990,8 +6990,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7020,8 +7020,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7050,8 +7050,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7081,8 +7081,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7093,12 +7093,12 @@ define @test_vloxseg3_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7113,8 +7113,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7144,8 +7144,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7156,12 +7156,12 @@ define @test_vloxseg3_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7176,8 +7176,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7188,13 +7188,13 @@ define @test_vloxseg4_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7209,8 +7209,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7221,13 +7221,13 @@ define @test_vloxseg4_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7242,8 +7242,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7274,8 +7274,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7286,13 +7286,13 @@ define @test_vloxseg4_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7307,8 +7307,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7319,14 +7319,14 @@ define @test_vloxseg5_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7341,8 +7341,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7353,14 +7353,14 @@ define @test_vloxseg5_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7375,8 +7375,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7408,8 +7408,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7420,14 +7420,14 @@ define @test_vloxseg5_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7442,8 +7442,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7454,15 +7454,15 @@ define @test_vloxseg6_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7477,8 +7477,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7489,15 +7489,15 @@ define @test_vloxseg6_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7512,8 +7512,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7524,15 +7524,15 @@ define @test_vloxseg6_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7547,8 +7547,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7559,15 +7559,15 @@ define @test_vloxseg6_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7582,8 +7582,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7594,16 +7594,16 @@ define @test_vloxseg7_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7618,8 +7618,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7630,16 +7630,16 @@ define @test_vloxseg7_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7654,8 +7654,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7666,16 +7666,16 @@ define @test_vloxseg7_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7690,8 +7690,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7702,16 +7702,16 @@ define @test_vloxseg7_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7726,8 +7726,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7763,8 +7763,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7800,8 +7800,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7837,8 +7837,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7874,8 +7874,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -7904,8 +7904,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -7934,8 +7934,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -7964,8 +7964,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -7994,8 +7994,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8006,12 +8006,12 @@ define @test_vloxseg3_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8026,8 +8026,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8038,12 +8038,12 @@ define @test_vloxseg3_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8058,8 +8058,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8070,12 +8070,12 @@ define @test_vloxseg3_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8090,8 +8090,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8102,12 +8102,12 @@ define @test_vloxseg3_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8122,8 +8122,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8134,13 +8134,13 @@ define @test_vloxseg4_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8155,8 +8155,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8167,13 +8167,13 @@ define @test_vloxseg4_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8188,8 +8188,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8200,13 +8200,13 @@ define @test_vloxseg4_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8221,8 +8221,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8233,13 +8233,13 @@ define @test_vloxseg4_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8254,8 +8254,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8266,14 +8266,14 @@ define @test_vloxseg5_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8288,8 +8288,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8300,14 +8300,14 @@ define @test_vloxseg5_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8322,8 +8322,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8334,14 +8334,14 @@ define @test_vloxseg5_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8356,8 +8356,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8368,14 +8368,14 @@ define @test_vloxseg5_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8390,8 +8390,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8402,15 +8402,15 @@ define @test_vloxseg6_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8425,8 +8425,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8437,15 +8437,15 @@ define @test_vloxseg6_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8460,8 +8460,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8472,15 +8472,15 @@ define @test_vloxseg6_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8495,8 +8495,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8507,15 +8507,15 @@ define @test_vloxseg6_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8530,8 +8530,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8542,16 +8542,16 @@ define @test_vloxseg7_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8566,8 +8566,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8578,16 +8578,16 @@ define @test_vloxseg7_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8602,8 +8602,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8614,16 +8614,16 @@ define @test_vloxseg7_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8638,8 +8638,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8650,16 +8650,16 @@ define @test_vloxseg7_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8674,8 +8674,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8711,8 +8711,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8748,8 +8748,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8785,8 +8785,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8822,8 +8822,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -8852,8 +8852,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -8882,8 +8882,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -8912,8 +8912,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -8942,8 +8942,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -8954,12 +8954,12 @@ define @test_vloxseg3_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8974,8 +8974,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -8986,12 +8986,12 @@ define @test_vloxseg3_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9006,8 +9006,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9018,12 +9018,12 @@ define @test_vloxseg3_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9038,8 +9038,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9069,8 +9069,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9081,13 +9081,13 @@ define @test_vloxseg4_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9102,8 +9102,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9114,13 +9114,13 @@ define @test_vloxseg4_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9135,8 +9135,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9147,13 +9147,13 @@ define @test_vloxseg4_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9168,8 +9168,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9180,13 +9180,13 @@ define @test_vloxseg4_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9201,8 +9201,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9213,14 +9213,14 @@ define @test_vloxseg5_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9235,8 +9235,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9247,14 +9247,14 @@ define @test_vloxseg5_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9269,8 +9269,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9281,14 +9281,14 @@ define @test_vloxseg5_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9303,8 +9303,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9315,14 +9315,14 @@ define @test_vloxseg5_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9337,8 +9337,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9349,15 +9349,15 @@ define @test_vloxseg6_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9372,8 +9372,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9384,15 +9384,15 @@ define @test_vloxseg6_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9407,8 +9407,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9419,15 +9419,15 @@ define @test_vloxseg6_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9442,8 +9442,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9454,15 +9454,15 @@ define @test_vloxseg6_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9477,8 +9477,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9489,16 +9489,16 @@ define @test_vloxseg7_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9513,8 +9513,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9525,16 +9525,16 @@ define @test_vloxseg7_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9549,8 +9549,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9561,16 +9561,16 @@ define @test_vloxseg7_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9585,8 +9585,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9597,16 +9597,16 @@ define @test_vloxseg7_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9621,8 +9621,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9658,8 +9658,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9695,8 +9695,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9732,8 +9732,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9769,8 +9769,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32* %base, %index, i64 %vl) @@ -9799,8 +9799,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32* %base, %index, i64 %vl) @@ -9829,8 +9829,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i64(i32* %base, %index, i64 %vl) @@ -9859,8 +9859,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32* %base, %index, i64 %vl) @@ -9889,8 +9889,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8* %base, %index, i64 %vl) @@ -9919,8 +9919,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8* %base, %index, i64 %vl) @@ -9949,8 +9949,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -9979,8 +9979,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10009,8 +10009,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10039,8 +10039,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10069,8 +10069,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10081,12 +10081,12 @@ define @test_vloxseg3_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10101,8 +10101,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10113,12 +10113,12 @@ define @test_vloxseg3_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10133,8 +10133,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10145,12 +10145,12 @@ define @test_vloxseg3_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10165,8 +10165,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10196,8 +10196,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10208,13 +10208,13 @@ define @test_vloxseg4_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10229,8 +10229,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10241,13 +10241,13 @@ define @test_vloxseg4_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10262,8 +10262,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10274,13 +10274,13 @@ define @test_vloxseg4_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10295,8 +10295,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10307,13 +10307,13 @@ define @test_vloxseg4_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10328,8 +10328,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10340,14 +10340,14 @@ define @test_vloxseg5_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10362,8 +10362,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10374,14 +10374,14 @@ define @test_vloxseg5_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10396,8 +10396,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10408,14 +10408,14 @@ define @test_vloxseg5_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10430,8 +10430,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10442,14 +10442,14 @@ define @test_vloxseg5_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10464,8 +10464,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10476,15 +10476,15 @@ define @test_vloxseg6_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10499,8 +10499,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10511,15 +10511,15 @@ define @test_vloxseg6_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10534,8 +10534,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10546,15 +10546,15 @@ define @test_vloxseg6_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10569,8 +10569,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10581,15 +10581,15 @@ define @test_vloxseg6_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10604,8 +10604,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10616,16 +10616,16 @@ define @test_vloxseg7_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10640,8 +10640,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10652,16 +10652,16 @@ define @test_vloxseg7_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10676,8 +10676,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10688,16 +10688,16 @@ define @test_vloxseg7_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10712,8 +10712,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10724,16 +10724,16 @@ define @test_vloxseg7_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10748,8 +10748,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10785,8 +10785,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10822,8 +10822,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10859,8 +10859,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10896,8 +10896,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -10926,8 +10926,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -10956,8 +10956,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -10986,8 +10986,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11016,8 +11016,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -11028,12 +11028,12 @@ define @test_vloxseg3_mask_nxv2i64_nxv2i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11048,8 +11048,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -11060,12 +11060,12 @@ define @test_vloxseg3_mask_nxv2i64_nxv2i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11080,8 +11080,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -11092,12 +11092,12 @@ define @test_vloxseg3_mask_nxv2i64_nxv2i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11112,8 +11112,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11124,12 +11124,12 @@ define @test_vloxseg3_mask_nxv2i64_nxv2i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11144,8 +11144,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -11177,8 +11177,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -11210,8 +11210,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -11243,8 +11243,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11276,8 +11276,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half* %base, %index, i64 %vl) @@ -11306,8 +11306,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half* %base, %index, i64 %vl) @@ -11336,8 +11336,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half* %base, %index, i64 %vl) @@ -11366,8 +11366,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double* %base, %index, i64 %vl) @@ -11396,8 +11396,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double* %base, %index, i64 %vl) @@ -11426,8 +11426,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i64(double* %base, %index, i64 %vl) @@ -11456,8 +11456,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double* %base, %index, i64 %vl) @@ -11486,8 +11486,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11516,8 +11516,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11546,8 +11546,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11576,8 +11576,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11606,8 +11606,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11618,12 +11618,12 @@ define @test_vloxseg3_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11638,8 +11638,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11650,12 +11650,12 @@ define @test_vloxseg3_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11670,8 +11670,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11682,12 +11682,12 @@ define @test_vloxseg3_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11702,8 +11702,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11714,12 +11714,12 @@ define @test_vloxseg3_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11734,8 +11734,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11746,13 +11746,13 @@ define @test_vloxseg4_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11767,8 +11767,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11779,13 +11779,13 @@ define @test_vloxseg4_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11800,8 +11800,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11812,13 +11812,13 @@ define @test_vloxseg4_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11833,8 +11833,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11845,13 +11845,13 @@ define @test_vloxseg4_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11866,8 +11866,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11878,14 +11878,14 @@ define @test_vloxseg5_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11900,8 +11900,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11912,14 +11912,14 @@ define @test_vloxseg5_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11934,8 +11934,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11946,14 +11946,14 @@ define @test_vloxseg5_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11968,8 +11968,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11980,14 +11980,14 @@ define @test_vloxseg5_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12002,8 +12002,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12014,15 +12014,15 @@ define @test_vloxseg6_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12037,8 +12037,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12049,15 +12049,15 @@ define @test_vloxseg6_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12072,8 +12072,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12084,15 +12084,15 @@ define @test_vloxseg6_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12107,8 +12107,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12119,15 +12119,15 @@ define @test_vloxseg6_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12142,8 +12142,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12154,16 +12154,16 @@ define @test_vloxseg7_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12178,8 +12178,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12190,16 +12190,16 @@ define @test_vloxseg7_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12214,8 +12214,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12226,16 +12226,16 @@ define @test_vloxseg7_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12250,8 +12250,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12262,16 +12262,16 @@ define @test_vloxseg7_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12286,8 +12286,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12323,8 +12323,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12360,8 +12360,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12397,8 +12397,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12434,8 +12434,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12464,8 +12464,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12494,8 +12494,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12524,8 +12524,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12554,8 +12554,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12566,12 +12566,12 @@ define @test_vloxseg3_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12586,8 +12586,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12598,12 +12598,12 @@ define @test_vloxseg3_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12618,8 +12618,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12630,12 +12630,12 @@ define @test_vloxseg3_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12650,8 +12650,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12681,8 +12681,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12693,13 +12693,13 @@ define @test_vloxseg4_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12714,8 +12714,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12726,13 +12726,13 @@ define @test_vloxseg4_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12747,8 +12747,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12759,13 +12759,13 @@ define @test_vloxseg4_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12780,8 +12780,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12792,13 +12792,13 @@ define @test_vloxseg4_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12813,8 +12813,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12825,14 +12825,14 @@ define @test_vloxseg5_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12847,8 +12847,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12859,14 +12859,14 @@ define @test_vloxseg5_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12881,8 +12881,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12893,14 +12893,14 @@ define @test_vloxseg5_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12915,8 +12915,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12927,14 +12927,14 @@ define @test_vloxseg5_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12949,8 +12949,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12961,15 +12961,15 @@ define @test_vloxseg6_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12984,8 +12984,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12996,15 +12996,15 @@ define @test_vloxseg6_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13019,8 +13019,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13031,15 +13031,15 @@ define @test_vloxseg6_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13054,8 +13054,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13066,15 +13066,15 @@ define @test_vloxseg6_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13089,8 +13089,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -13101,16 +13101,16 @@ define @test_vloxseg7_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13125,8 +13125,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13137,16 +13137,16 @@ define @test_vloxseg7_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13161,8 +13161,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13173,16 +13173,16 @@ define @test_vloxseg7_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13197,8 +13197,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13209,16 +13209,16 @@ define @test_vloxseg7_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13233,8 +13233,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -13270,8 +13270,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13307,8 +13307,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13344,8 +13344,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13381,8 +13381,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13411,8 +13411,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13441,8 +13441,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13471,8 +13471,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13501,8 +13501,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13513,12 +13513,12 @@ define @test_vloxseg3_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13533,8 +13533,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13545,12 +13545,12 @@ define @test_vloxseg3_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13565,8 +13565,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13577,12 +13577,12 @@ define @test_vloxseg3_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13597,8 +13597,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13609,12 +13609,12 @@ define @test_vloxseg3_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13629,8 +13629,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13641,13 +13641,13 @@ define @test_vloxseg4_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13662,8 +13662,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13674,13 +13674,13 @@ define @test_vloxseg4_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13695,8 +13695,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13707,13 +13707,13 @@ define @test_vloxseg4_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13728,8 +13728,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13740,13 +13740,13 @@ define @test_vloxseg4_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13761,8 +13761,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13773,14 +13773,14 @@ define @test_vloxseg5_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13795,8 +13795,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13807,14 +13807,14 @@ define @test_vloxseg5_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13829,8 +13829,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13841,14 +13841,14 @@ define @test_vloxseg5_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13863,8 +13863,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13875,14 +13875,14 @@ define @test_vloxseg5_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13897,8 +13897,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13909,15 +13909,15 @@ define @test_vloxseg6_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13932,8 +13932,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13944,15 +13944,15 @@ define @test_vloxseg6_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13967,8 +13967,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13979,15 +13979,15 @@ define @test_vloxseg6_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14002,8 +14002,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -14014,15 +14014,15 @@ define @test_vloxseg6_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14037,8 +14037,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -14049,16 +14049,16 @@ define @test_vloxseg7_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14073,8 +14073,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -14085,16 +14085,16 @@ define @test_vloxseg7_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14109,8 +14109,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -14121,16 +14121,16 @@ define @test_vloxseg7_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14145,8 +14145,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -14157,16 +14157,16 @@ define @test_vloxseg7_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14181,8 +14181,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -14218,8 +14218,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -14255,8 +14255,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -14292,8 +14292,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -14329,8 +14329,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14359,8 +14359,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14389,8 +14389,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14419,8 +14419,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14449,8 +14449,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14461,12 +14461,12 @@ define @test_vloxseg3_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14481,8 +14481,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14493,12 +14493,12 @@ define @test_vloxseg3_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14513,8 +14513,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14525,12 +14525,12 @@ define @test_vloxseg3_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14545,8 +14545,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14557,12 +14557,12 @@ define @test_vloxseg3_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14577,8 +14577,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14589,13 +14589,13 @@ define @test_vloxseg4_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14610,8 +14610,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14622,13 +14622,13 @@ define @test_vloxseg4_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14643,8 +14643,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14655,13 +14655,13 @@ define @test_vloxseg4_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14676,8 +14676,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14688,13 +14688,13 @@ define @test_vloxseg4_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14709,8 +14709,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14721,14 +14721,14 @@ define @test_vloxseg5_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14743,8 +14743,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14755,14 +14755,14 @@ define @test_vloxseg5_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14777,8 +14777,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14789,14 +14789,14 @@ define @test_vloxseg5_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14811,8 +14811,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14823,14 +14823,14 @@ define @test_vloxseg5_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14845,8 +14845,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14857,15 +14857,15 @@ define @test_vloxseg6_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14880,8 +14880,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14892,15 +14892,15 @@ define @test_vloxseg6_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14915,8 +14915,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14927,15 +14927,15 @@ define @test_vloxseg6_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14950,8 +14950,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14962,15 +14962,15 @@ define @test_vloxseg6_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14985,8 +14985,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14997,16 +14997,16 @@ define @test_vloxseg7_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15021,8 +15021,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -15033,16 +15033,16 @@ define @test_vloxseg7_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15057,8 +15057,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -15069,16 +15069,16 @@ define @test_vloxseg7_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15093,8 +15093,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -15105,16 +15105,16 @@ define @test_vloxseg7_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15129,8 +15129,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -15166,8 +15166,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -15203,8 +15203,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -15240,8 +15240,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -15277,8 +15277,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15307,8 +15307,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15337,8 +15337,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15367,8 +15367,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15397,8 +15397,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15409,12 +15409,12 @@ define @test_vloxseg3_mask_nxv8f16_nxv8i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15429,8 +15429,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15441,12 +15441,12 @@ define @test_vloxseg3_mask_nxv8f16_nxv8i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15461,8 +15461,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15492,8 +15492,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15523,8 +15523,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15556,8 +15556,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15589,8 +15589,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15621,8 +15621,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15654,8 +15654,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float* %base, %index, i64 %vl) @@ -15684,8 +15684,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float* %base, %index, i64 %vl) @@ -15714,8 +15714,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i64(float* %base, %index, i64 %vl) @@ -15744,8 +15744,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float* %base, %index, i64 %vl) @@ -15774,8 +15774,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -15804,8 +15804,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -15834,8 +15834,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -15864,8 +15864,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -15894,8 +15894,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -15906,12 +15906,12 @@ define @test_vloxseg3_mask_nxv2f64_nxv2i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15926,8 +15926,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -15938,12 +15938,12 @@ define @test_vloxseg3_mask_nxv2f64_nxv2i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15958,8 +15958,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -15970,12 +15970,12 @@ define @test_vloxseg3_mask_nxv2f64_nxv2i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15990,8 +15990,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -16002,12 +16002,12 @@ define @test_vloxseg3_mask_nxv2f64_nxv2i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -16022,8 +16022,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -16055,8 +16055,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -16088,8 +16088,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -16121,8 +16121,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -16154,8 +16154,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16184,8 +16184,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16214,8 +16214,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16244,8 +16244,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16274,8 +16274,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16305,8 +16305,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16317,12 +16317,12 @@ define @test_vloxseg3_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16337,8 +16337,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16368,8 +16368,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16380,12 +16380,12 @@ define @test_vloxseg3_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16400,8 +16400,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16412,13 +16412,13 @@ define @test_vloxseg4_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16433,8 +16433,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16445,13 +16445,13 @@ define @test_vloxseg4_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16466,8 +16466,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16498,8 +16498,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16510,13 +16510,13 @@ define @test_vloxseg4_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16531,8 +16531,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16543,14 +16543,14 @@ define @test_vloxseg5_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16565,8 +16565,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16577,14 +16577,14 @@ define @test_vloxseg5_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16599,8 +16599,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16632,8 +16632,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16644,14 +16644,14 @@ define @test_vloxseg5_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16666,8 +16666,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16678,15 +16678,15 @@ define @test_vloxseg6_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16701,8 +16701,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16713,15 +16713,15 @@ define @test_vloxseg6_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16736,8 +16736,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16748,15 +16748,15 @@ define @test_vloxseg6_mask_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16771,8 +16771,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16783,15 +16783,15 @@ define @test_vloxseg6_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16806,8 +16806,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16818,16 +16818,16 @@ define @test_vloxseg7_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16842,8 +16842,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16854,16 +16854,16 @@ define @test_vloxseg7_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16878,8 +16878,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16890,16 +16890,16 @@ define @test_vloxseg7_mask_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16914,8 +16914,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16926,16 +16926,16 @@ define @test_vloxseg7_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16950,8 +16950,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16987,8 +16987,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -17024,8 +17024,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -17061,8 +17061,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -17098,8 +17098,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17128,8 +17128,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17158,8 +17158,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17188,8 +17188,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17218,8 +17218,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17230,12 +17230,12 @@ define @test_vloxseg3_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17250,8 +17250,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17262,12 +17262,12 @@ define @test_vloxseg3_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17282,8 +17282,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17294,12 +17294,12 @@ define @test_vloxseg3_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17314,8 +17314,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17345,8 +17345,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17357,13 +17357,13 @@ define @test_vloxseg4_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17378,8 +17378,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17390,13 +17390,13 @@ define @test_vloxseg4_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17411,8 +17411,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17423,13 +17423,13 @@ define @test_vloxseg4_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17444,8 +17444,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17456,13 +17456,13 @@ define @test_vloxseg4_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17477,8 +17477,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17489,14 +17489,14 @@ define @test_vloxseg5_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17511,8 +17511,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17523,14 +17523,14 @@ define @test_vloxseg5_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17545,8 +17545,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17557,14 +17557,14 @@ define @test_vloxseg5_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17579,8 +17579,8 @@ ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17591,14 +17591,14 @@ define @test_vloxseg5_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17613,8 +17613,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17625,15 +17625,15 @@ define @test_vloxseg6_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17648,8 +17648,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17660,15 +17660,15 @@ define @test_vloxseg6_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17683,8 +17683,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17695,15 +17695,15 @@ define @test_vloxseg6_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17718,8 +17718,8 @@ ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17730,15 +17730,15 @@ define @test_vloxseg6_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17753,8 +17753,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17765,16 +17765,16 @@ define @test_vloxseg7_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17789,8 +17789,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17801,16 +17801,16 @@ define @test_vloxseg7_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17825,8 +17825,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17837,16 +17837,16 @@ define @test_vloxseg7_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17861,8 +17861,8 @@ ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17873,16 +17873,16 @@ define @test_vloxseg7_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17897,8 +17897,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17934,8 +17934,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17971,8 +17971,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -18008,8 +18008,8 @@ ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -18045,8 +18045,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18075,8 +18075,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18105,8 +18105,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18135,8 +18135,8 @@ ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) @@ -18165,8 +18165,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18177,12 +18177,12 @@ define @test_vloxseg3_mask_nxv4f32_nxv4i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18197,8 +18197,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18209,12 +18209,12 @@ define @test_vloxseg3_mask_nxv4f32_nxv4i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18229,8 +18229,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18260,8 +18260,8 @@ ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) @@ -18272,12 +18272,12 @@ define @test_vloxseg3_mask_nxv4f32_nxv4i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18292,8 +18292,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18325,8 +18325,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18358,8 +18358,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18391,8 +18391,8 @@ ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll @@ -9,7 +9,7 @@ ; CHECK-LABEL: test_vlseg2ff_dead_value: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v0, (a0) +; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ ; CHECK-LABEL: test_vlseg2ff_dead_all: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v0, (a0) +; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: ret entry: tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll @@ -9,7 +9,7 @@ ; CHECK-LABEL: test_vlseg2ff_dead_value: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v0, (a0) +; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ ; CHECK-LABEL: test_vlseg2ff_dead_all: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vlseg2e16ff.v v0, (a0) +; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: ret entry: tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll @@ -9,8 +9,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16* %base, %index, i32 %vl) @@ -39,8 +39,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(i16* %base, %index, i32 %vl) @@ -69,8 +69,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(i16* %base, %index, i32 %vl) @@ -99,8 +99,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -129,8 +129,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -159,8 +159,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -189,8 +189,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -201,12 +201,12 @@ define @test_vluxseg3_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -221,8 +221,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -233,12 +233,12 @@ define @test_vluxseg3_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -253,8 +253,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -265,12 +265,12 @@ define @test_vluxseg3_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -285,8 +285,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -297,13 +297,13 @@ define @test_vluxseg4_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -318,8 +318,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -330,13 +330,13 @@ define @test_vluxseg4_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -351,8 +351,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -363,13 +363,13 @@ define @test_vluxseg4_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -384,8 +384,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -396,14 +396,14 @@ define @test_vluxseg5_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -418,8 +418,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -430,14 +430,14 @@ define @test_vluxseg5_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -452,8 +452,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -464,14 +464,14 @@ define @test_vluxseg5_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -486,8 +486,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -498,15 +498,15 @@ define @test_vluxseg6_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -521,8 +521,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -533,15 +533,15 @@ define @test_vluxseg6_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -556,8 +556,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -568,15 +568,15 @@ define @test_vluxseg6_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -591,8 +591,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -603,16 +603,16 @@ define @test_vluxseg7_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -627,8 +627,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -639,16 +639,16 @@ define @test_vluxseg7_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -663,8 +663,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -675,16 +675,16 @@ define @test_vluxseg7_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -699,8 +699,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -736,8 +736,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -773,8 +773,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -810,8 +810,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -840,8 +840,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -870,8 +870,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -900,8 +900,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -931,8 +931,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -943,12 +943,12 @@ define @test_vluxseg3_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -963,8 +963,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -994,8 +994,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -1027,8 +1027,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -1060,8 +1060,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -1092,8 +1092,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1122,8 +1122,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1152,8 +1152,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1182,8 +1182,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1194,12 +1194,12 @@ define @test_vluxseg3_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1214,8 +1214,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1226,12 +1226,12 @@ define @test_vluxseg3_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1246,8 +1246,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1258,12 +1258,12 @@ define @test_vluxseg3_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1278,8 +1278,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1290,13 +1290,13 @@ define @test_vluxseg4_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1311,8 +1311,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1323,13 +1323,13 @@ define @test_vluxseg4_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1344,8 +1344,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1356,13 +1356,13 @@ define @test_vluxseg4_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1377,8 +1377,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1389,14 +1389,14 @@ define @test_vluxseg5_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1411,8 +1411,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1423,14 +1423,14 @@ define @test_vluxseg5_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1445,8 +1445,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1457,14 +1457,14 @@ define @test_vluxseg5_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1479,8 +1479,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1491,15 +1491,15 @@ define @test_vluxseg6_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1514,8 +1514,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1526,15 +1526,15 @@ define @test_vluxseg6_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1549,8 +1549,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1561,15 +1561,15 @@ define @test_vluxseg6_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1584,8 +1584,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1596,16 +1596,16 @@ define @test_vluxseg7_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1620,8 +1620,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1632,16 +1632,16 @@ define @test_vluxseg7_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1656,8 +1656,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1668,16 +1668,16 @@ define @test_vluxseg7_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1692,8 +1692,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1729,8 +1729,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1766,8 +1766,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1803,8 +1803,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -1833,8 +1833,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -1863,8 +1863,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -1893,8 +1893,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -1905,12 +1905,12 @@ define @test_vluxseg3_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1925,8 +1925,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -1937,12 +1937,12 @@ define @test_vluxseg3_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1957,8 +1957,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -1988,8 +1988,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2000,13 +2000,13 @@ define @test_vluxseg4_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2021,8 +2021,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2033,13 +2033,13 @@ define @test_vluxseg4_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2054,8 +2054,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2066,13 +2066,13 @@ define @test_vluxseg4_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2087,8 +2087,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2099,14 +2099,14 @@ define @test_vluxseg5_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2121,8 +2121,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2133,14 +2133,14 @@ define @test_vluxseg5_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2155,8 +2155,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2167,14 +2167,14 @@ define @test_vluxseg5_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2189,8 +2189,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2201,15 +2201,15 @@ define @test_vluxseg6_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2224,8 +2224,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2236,15 +2236,15 @@ define @test_vluxseg6_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2259,8 +2259,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2271,19 +2271,19 @@ define @test_vluxseg6_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) - %1 = extractvalue {,,,,,} %0, 1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) + %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2294,8 +2294,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2306,16 +2306,16 @@ define @test_vluxseg7_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2330,8 +2330,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2342,16 +2342,16 @@ define @test_vluxseg7_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2366,8 +2366,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2378,16 +2378,16 @@ define @test_vluxseg7_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2402,8 +2402,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2439,8 +2439,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2476,8 +2476,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2513,8 +2513,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2543,8 +2543,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2573,8 +2573,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2603,8 +2603,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2615,12 +2615,12 @@ define @test_vluxseg3_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2635,8 +2635,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2647,12 +2647,12 @@ define @test_vluxseg3_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2667,8 +2667,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2679,12 +2679,12 @@ define @test_vluxseg3_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2699,8 +2699,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2711,13 +2711,13 @@ define @test_vluxseg4_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2732,8 +2732,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2744,13 +2744,13 @@ define @test_vluxseg4_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2765,8 +2765,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2777,13 +2777,13 @@ define @test_vluxseg4_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2798,8 +2798,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2810,14 +2810,14 @@ define @test_vluxseg5_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2832,8 +2832,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2844,14 +2844,14 @@ define @test_vluxseg5_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2866,8 +2866,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2878,14 +2878,14 @@ define @test_vluxseg5_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2900,8 +2900,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2912,15 +2912,15 @@ define @test_vluxseg6_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2935,8 +2935,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2947,15 +2947,15 @@ define @test_vluxseg6_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2970,8 +2970,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2982,15 +2982,15 @@ define @test_vluxseg6_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3005,8 +3005,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -3017,16 +3017,16 @@ define @test_vluxseg7_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3041,8 +3041,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -3053,16 +3053,16 @@ define @test_vluxseg7_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3077,8 +3077,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -3089,16 +3089,16 @@ define @test_vluxseg7_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3113,8 +3113,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -3150,8 +3150,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -3187,8 +3187,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -3224,8 +3224,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3254,8 +3254,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3284,8 +3284,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3314,8 +3314,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3326,12 +3326,12 @@ define @test_vluxseg3_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3346,8 +3346,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3358,12 +3358,12 @@ define @test_vluxseg3_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3378,8 +3378,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3409,8 +3409,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3442,8 +3442,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3475,8 +3475,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3508,8 +3508,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3538,8 +3538,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3568,8 +3568,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3598,8 +3598,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3629,8 +3629,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3641,12 +3641,12 @@ define @test_vluxseg3_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3661,8 +3661,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3692,8 +3692,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3704,13 +3704,13 @@ define @test_vluxseg4_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3725,8 +3725,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3737,13 +3737,13 @@ define @test_vluxseg4_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3758,8 +3758,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3790,8 +3790,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3802,14 +3802,14 @@ define @test_vluxseg5_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3824,8 +3824,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3836,14 +3836,14 @@ define @test_vluxseg5_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3858,8 +3858,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3891,8 +3891,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3903,15 +3903,15 @@ define @test_vluxseg6_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3926,8 +3926,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3938,15 +3938,15 @@ define @test_vluxseg6_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3961,8 +3961,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3973,15 +3973,15 @@ define @test_vluxseg6_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3996,8 +3996,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -4008,16 +4008,16 @@ define @test_vluxseg7_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4032,8 +4032,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -4044,16 +4044,16 @@ define @test_vluxseg7_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4068,8 +4068,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -4080,16 +4080,16 @@ define @test_vluxseg7_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4104,8 +4104,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -4141,8 +4141,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -4178,8 +4178,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -4215,8 +4215,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(i32* %base, %index, i32 %vl) @@ -4245,8 +4245,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(i32* %base, %index, i32 %vl) @@ -4275,8 +4275,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(i32* %base, %index, i32 %vl) @@ -4305,8 +4305,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4335,8 +4335,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4365,8 +4365,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4395,8 +4395,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4407,12 +4407,12 @@ define @test_vluxseg3_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4427,8 +4427,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4439,12 +4439,12 @@ define @test_vluxseg3_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4459,8 +4459,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4490,8 +4490,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4502,13 +4502,13 @@ define @test_vluxseg4_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4523,8 +4523,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4535,13 +4535,13 @@ define @test_vluxseg4_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4556,8 +4556,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4568,13 +4568,13 @@ define @test_vluxseg4_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4589,8 +4589,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4601,17 +4601,17 @@ define @test_vluxseg5_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4623,8 +4623,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4635,14 +4635,14 @@ define @test_vluxseg5_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4657,8 +4657,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4669,14 +4669,14 @@ define @test_vluxseg5_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4691,8 +4691,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4703,15 +4703,15 @@ define @test_vluxseg6_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4726,8 +4726,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4738,15 +4738,15 @@ define @test_vluxseg6_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4761,8 +4761,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4773,15 +4773,15 @@ define @test_vluxseg6_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4796,8 +4796,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4808,16 +4808,16 @@ define @test_vluxseg7_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4832,8 +4832,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4844,16 +4844,16 @@ define @test_vluxseg7_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4868,8 +4868,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4880,16 +4880,16 @@ define @test_vluxseg7_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4904,8 +4904,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4941,8 +4941,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4978,8 +4978,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -5015,8 +5015,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5045,8 +5045,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5075,8 +5075,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5105,8 +5105,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5117,12 +5117,12 @@ define @test_vluxseg3_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5137,8 +5137,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5149,12 +5149,12 @@ define @test_vluxseg3_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5169,8 +5169,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5181,12 +5181,12 @@ define @test_vluxseg3_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5201,8 +5201,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5213,13 +5213,13 @@ define @test_vluxseg4_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5234,8 +5234,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5246,13 +5246,13 @@ define @test_vluxseg4_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5267,8 +5267,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5279,13 +5279,13 @@ define @test_vluxseg4_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5300,8 +5300,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5312,14 +5312,14 @@ define @test_vluxseg5_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5334,8 +5334,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5346,14 +5346,14 @@ define @test_vluxseg5_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5368,8 +5368,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5380,14 +5380,14 @@ define @test_vluxseg5_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5402,8 +5402,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5414,15 +5414,15 @@ define @test_vluxseg6_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5437,8 +5437,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5449,15 +5449,15 @@ define @test_vluxseg6_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5472,8 +5472,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5484,15 +5484,15 @@ define @test_vluxseg6_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5507,8 +5507,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5519,16 +5519,16 @@ define @test_vluxseg7_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5543,8 +5543,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5555,16 +5555,16 @@ define @test_vluxseg7_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5579,8 +5579,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5591,16 +5591,16 @@ define @test_vluxseg7_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5615,8 +5615,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5652,8 +5652,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5689,8 +5689,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5726,8 +5726,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(i8* %base, %index, i32 %vl) @@ -5756,8 +5756,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(i8* %base, %index, i32 %vl) @@ -5786,8 +5786,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -5816,8 +5816,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -5846,8 +5846,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -5876,8 +5876,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -5888,12 +5888,12 @@ define @test_vluxseg3_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5908,8 +5908,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -5920,12 +5920,12 @@ define @test_vluxseg3_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5940,8 +5940,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -5952,12 +5952,12 @@ define @test_vluxseg3_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5972,8 +5972,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -5984,13 +5984,13 @@ define @test_vluxseg4_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6005,8 +6005,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6017,13 +6017,13 @@ define @test_vluxseg4_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6038,8 +6038,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6050,13 +6050,13 @@ define @test_vluxseg4_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6071,8 +6071,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6083,14 +6083,14 @@ define @test_vluxseg5_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6105,8 +6105,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6117,14 +6117,14 @@ define @test_vluxseg5_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6139,8 +6139,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6151,14 +6151,14 @@ define @test_vluxseg5_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6173,8 +6173,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6185,15 +6185,15 @@ define @test_vluxseg6_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6208,8 +6208,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6220,15 +6220,15 @@ define @test_vluxseg6_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6243,8 +6243,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6255,15 +6255,15 @@ define @test_vluxseg6_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6278,8 +6278,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6290,16 +6290,16 @@ define @test_vluxseg7_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6314,8 +6314,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6326,16 +6326,16 @@ define @test_vluxseg7_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6350,8 +6350,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6362,16 +6362,16 @@ define @test_vluxseg7_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6386,8 +6386,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6423,8 +6423,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6460,8 +6460,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6497,8 +6497,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6527,8 +6527,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6557,8 +6557,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6587,8 +6587,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6599,12 +6599,12 @@ define @test_vluxseg3_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6619,8 +6619,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6631,12 +6631,12 @@ define @test_vluxseg3_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6651,8 +6651,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6663,12 +6663,12 @@ define @test_vluxseg3_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6683,8 +6683,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6695,13 +6695,13 @@ define @test_vluxseg4_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6716,8 +6716,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6728,13 +6728,13 @@ define @test_vluxseg4_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6749,8 +6749,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6761,13 +6761,13 @@ define @test_vluxseg4_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6782,8 +6782,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6794,14 +6794,14 @@ define @test_vluxseg5_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6816,8 +6816,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6828,14 +6828,14 @@ define @test_vluxseg5_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6850,8 +6850,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6862,14 +6862,14 @@ define @test_vluxseg5_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6884,8 +6884,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6896,15 +6896,15 @@ define @test_vluxseg6_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6919,8 +6919,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6931,15 +6931,15 @@ define @test_vluxseg6_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6954,8 +6954,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6966,15 +6966,15 @@ define @test_vluxseg6_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6989,8 +6989,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -7001,16 +7001,16 @@ define @test_vluxseg7_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7025,8 +7025,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -7037,16 +7037,16 @@ define @test_vluxseg7_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7061,8 +7061,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -7073,16 +7073,16 @@ define @test_vluxseg7_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7097,8 +7097,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -7134,8 +7134,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -7171,8 +7171,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -7208,8 +7208,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7238,8 +7238,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7268,8 +7268,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7298,8 +7298,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7310,12 +7310,12 @@ define @test_vluxseg3_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7330,8 +7330,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7342,12 +7342,12 @@ define @test_vluxseg3_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7362,8 +7362,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7374,12 +7374,12 @@ define @test_vluxseg3_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7394,8 +7394,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7427,8 +7427,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7460,8 +7460,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7493,8 +7493,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(half* %base, %index, i32 %vl) @@ -7523,8 +7523,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(half* %base, %index, i32 %vl) @@ -7553,8 +7553,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(half* %base, %index, i32 %vl) @@ -7583,8 +7583,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(double* %base, %index, i32 %vl) @@ -7613,8 +7613,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(double* %base, %index, i32 %vl) @@ -7643,8 +7643,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(double* %base, %index, i32 %vl) @@ -7673,8 +7673,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7703,8 +7703,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7733,8 +7733,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7763,8 +7763,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7775,12 +7775,12 @@ define @test_vluxseg3_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7795,8 +7795,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7807,12 +7807,12 @@ define @test_vluxseg3_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7827,8 +7827,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7839,12 +7839,12 @@ define @test_vluxseg3_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7859,8 +7859,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7871,13 +7871,13 @@ define @test_vluxseg4_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7892,8 +7892,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7904,13 +7904,13 @@ define @test_vluxseg4_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7925,8 +7925,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7937,13 +7937,13 @@ define @test_vluxseg4_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7958,8 +7958,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7970,14 +7970,14 @@ define @test_vluxseg5_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7992,8 +7992,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8004,14 +8004,14 @@ define @test_vluxseg5_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8026,8 +8026,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8038,14 +8038,14 @@ define @test_vluxseg5_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8060,8 +8060,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8072,15 +8072,15 @@ define @test_vluxseg6_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8095,8 +8095,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8107,15 +8107,15 @@ define @test_vluxseg6_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8130,8 +8130,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8142,15 +8142,15 @@ define @test_vluxseg6_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8165,8 +8165,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8177,16 +8177,16 @@ define @test_vluxseg7_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8201,8 +8201,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8213,16 +8213,16 @@ define @test_vluxseg7_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8237,8 +8237,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8249,16 +8249,16 @@ define @test_vluxseg7_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8273,8 +8273,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8310,8 +8310,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8347,8 +8347,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8384,8 +8384,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8414,8 +8414,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8444,8 +8444,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8474,8 +8474,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8486,12 +8486,12 @@ define @test_vluxseg3_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8506,8 +8506,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8518,12 +8518,12 @@ define @test_vluxseg3_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8538,8 +8538,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8550,12 +8550,12 @@ define @test_vluxseg3_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8570,8 +8570,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8582,13 +8582,13 @@ define @test_vluxseg4_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8603,8 +8603,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8615,13 +8615,13 @@ define @test_vluxseg4_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8636,8 +8636,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8648,13 +8648,13 @@ define @test_vluxseg4_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8669,8 +8669,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8681,14 +8681,14 @@ define @test_vluxseg5_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8703,8 +8703,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8715,14 +8715,14 @@ define @test_vluxseg5_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8737,8 +8737,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8749,14 +8749,14 @@ define @test_vluxseg5_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8771,8 +8771,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8783,15 +8783,15 @@ define @test_vluxseg6_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8806,8 +8806,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8818,15 +8818,15 @@ define @test_vluxseg6_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8841,8 +8841,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8853,15 +8853,15 @@ define @test_vluxseg6_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8876,8 +8876,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8888,16 +8888,16 @@ define @test_vluxseg7_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8912,8 +8912,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8924,16 +8924,16 @@ define @test_vluxseg7_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8948,8 +8948,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8960,16 +8960,16 @@ define @test_vluxseg7_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8984,8 +8984,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -9021,8 +9021,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -9058,8 +9058,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -9095,8 +9095,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9125,8 +9125,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9155,8 +9155,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9185,8 +9185,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9197,12 +9197,12 @@ define @test_vluxseg3_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9217,8 +9217,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9229,12 +9229,12 @@ define @test_vluxseg3_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9249,8 +9249,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9261,12 +9261,12 @@ define @test_vluxseg3_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9281,8 +9281,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9293,13 +9293,13 @@ define @test_vluxseg4_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9314,8 +9314,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9326,13 +9326,13 @@ define @test_vluxseg4_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9347,8 +9347,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9359,13 +9359,13 @@ define @test_vluxseg4_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9380,8 +9380,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9392,14 +9392,14 @@ define @test_vluxseg5_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9414,8 +9414,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9426,14 +9426,14 @@ define @test_vluxseg5_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9448,8 +9448,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9460,14 +9460,14 @@ define @test_vluxseg5_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9482,8 +9482,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9494,15 +9494,15 @@ define @test_vluxseg6_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9517,8 +9517,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9529,15 +9529,15 @@ define @test_vluxseg6_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9552,8 +9552,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9564,15 +9564,15 @@ define @test_vluxseg6_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9587,8 +9587,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9599,16 +9599,16 @@ define @test_vluxseg7_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9623,8 +9623,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9635,16 +9635,16 @@ define @test_vluxseg7_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9659,8 +9659,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9671,16 +9671,16 @@ define @test_vluxseg7_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9695,8 +9695,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9732,8 +9732,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9769,8 +9769,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9806,8 +9806,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -9836,8 +9836,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -9866,8 +9866,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -9896,8 +9896,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -9908,12 +9908,12 @@ define @test_vluxseg3_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9928,8 +9928,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -9940,12 +9940,12 @@ define @test_vluxseg3_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9960,8 +9960,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -9972,12 +9972,12 @@ define @test_vluxseg3_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9992,8 +9992,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10004,13 +10004,13 @@ define @test_vluxseg4_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10025,8 +10025,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10037,13 +10037,13 @@ define @test_vluxseg4_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10058,8 +10058,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10070,13 +10070,13 @@ define @test_vluxseg4_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10091,8 +10091,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10103,14 +10103,14 @@ define @test_vluxseg5_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10125,8 +10125,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10137,14 +10137,14 @@ define @test_vluxseg5_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10159,8 +10159,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10171,14 +10171,14 @@ define @test_vluxseg5_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10193,8 +10193,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10205,15 +10205,15 @@ define @test_vluxseg6_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10228,8 +10228,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10240,15 +10240,15 @@ define @test_vluxseg6_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10263,8 +10263,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10275,15 +10275,15 @@ define @test_vluxseg6_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10298,8 +10298,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10310,16 +10310,16 @@ define @test_vluxseg7_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10334,8 +10334,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10346,16 +10346,16 @@ define @test_vluxseg7_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10370,8 +10370,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10382,16 +10382,16 @@ define @test_vluxseg7_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10406,8 +10406,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10443,8 +10443,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10480,8 +10480,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10517,8 +10517,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10547,8 +10547,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10577,8 +10577,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10607,8 +10607,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10619,12 +10619,12 @@ define @test_vluxseg3_mask_nxv8f16_nxv8i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10639,8 +10639,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10651,12 +10651,12 @@ define @test_vluxseg3_mask_nxv8f16_nxv8i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10671,8 +10671,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10702,8 +10702,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10735,8 +10735,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10768,8 +10768,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10801,8 +10801,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(float* %base, %index, i32 %vl) @@ -10831,8 +10831,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(float* %base, %index, i32 %vl) @@ -10861,8 +10861,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(float* %base, %index, i32 %vl) @@ -10891,8 +10891,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -10921,8 +10921,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -10951,8 +10951,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -10981,8 +10981,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -10993,12 +10993,12 @@ define @test_vluxseg3_mask_nxv2f64_nxv2i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11013,8 +11013,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -11025,12 +11025,12 @@ define @test_vluxseg3_mask_nxv2f64_nxv2i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11045,8 +11045,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -11057,12 +11057,12 @@ define @test_vluxseg3_mask_nxv2f64_nxv2i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11077,8 +11077,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -11110,8 +11110,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -11143,8 +11143,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -11176,8 +11176,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11206,8 +11206,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11236,8 +11236,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11266,8 +11266,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11278,12 +11278,12 @@ define @test_vluxseg3_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11298,8 +11298,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11310,12 +11310,12 @@ define @test_vluxseg3_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11330,8 +11330,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11361,8 +11361,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11373,13 +11373,13 @@ define @test_vluxseg4_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11394,8 +11394,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11406,13 +11406,13 @@ define @test_vluxseg4_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11427,8 +11427,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11439,13 +11439,13 @@ define @test_vluxseg4_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11460,8 +11460,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11472,14 +11472,14 @@ define @test_vluxseg5_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11494,8 +11494,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11506,14 +11506,14 @@ define @test_vluxseg5_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11528,8 +11528,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11540,14 +11540,14 @@ define @test_vluxseg5_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11562,8 +11562,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11574,15 +11574,15 @@ define @test_vluxseg6_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11597,8 +11597,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11609,15 +11609,15 @@ define @test_vluxseg6_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11632,8 +11632,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11644,15 +11644,15 @@ define @test_vluxseg6_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11667,8 +11667,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11679,16 +11679,16 @@ define @test_vluxseg7_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11703,8 +11703,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11715,16 +11715,16 @@ define @test_vluxseg7_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11739,8 +11739,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11751,16 +11751,16 @@ define @test_vluxseg7_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11775,8 +11775,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11812,8 +11812,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11849,8 +11849,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11886,8 +11886,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -11916,8 +11916,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -11946,8 +11946,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -11976,8 +11976,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -11988,12 +11988,12 @@ define @test_vluxseg3_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12008,8 +12008,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12020,12 +12020,12 @@ define @test_vluxseg3_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12040,8 +12040,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12052,12 +12052,12 @@ define @test_vluxseg3_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12072,8 +12072,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12084,13 +12084,13 @@ define @test_vluxseg4_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12105,8 +12105,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12117,13 +12117,13 @@ define @test_vluxseg4_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12138,8 +12138,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12150,13 +12150,13 @@ define @test_vluxseg4_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12171,8 +12171,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12183,14 +12183,14 @@ define @test_vluxseg5_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12205,8 +12205,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12217,14 +12217,14 @@ define @test_vluxseg5_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12239,8 +12239,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12251,14 +12251,14 @@ define @test_vluxseg5_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12273,8 +12273,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12285,15 +12285,15 @@ define @test_vluxseg6_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12308,8 +12308,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12320,15 +12320,15 @@ define @test_vluxseg6_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12343,8 +12343,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12355,15 +12355,15 @@ define @test_vluxseg6_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12378,8 +12378,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12390,16 +12390,16 @@ define @test_vluxseg7_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12414,8 +12414,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12426,16 +12426,16 @@ define @test_vluxseg7_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12450,8 +12450,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12462,16 +12462,16 @@ define @test_vluxseg7_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12486,8 +12486,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12523,8 +12523,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12560,8 +12560,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12597,8 +12597,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12627,8 +12627,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12657,8 +12657,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) @@ -12687,8 +12687,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12699,12 +12699,12 @@ define @test_vluxseg3_mask_nxv4f32_nxv4i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12719,8 +12719,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12731,12 +12731,12 @@ define @test_vluxseg3_mask_nxv4f32_nxv4i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12751,8 +12751,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) @@ -12763,12 +12763,12 @@ define @test_vluxseg3_mask_nxv4f32_nxv4i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12783,8 +12783,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12816,8 +12816,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12849,8 +12849,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll @@ -9,8 +9,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 %vl) @@ -39,8 +39,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(i16* %base, %index, i64 %vl) @@ -69,8 +69,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(i16* %base, %index, i64 %vl) @@ -99,8 +99,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -129,8 +129,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -159,8 +159,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -189,8 +189,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -219,8 +219,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -231,12 +231,12 @@ define @test_vluxseg3_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -251,8 +251,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -263,12 +263,12 @@ define @test_vluxseg3_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -283,8 +283,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -314,8 +314,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -326,12 +326,12 @@ define @test_vluxseg3_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -346,8 +346,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -379,8 +379,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -412,8 +412,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -445,8 +445,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -478,8 +478,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -508,8 +508,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -538,8 +538,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -568,8 +568,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -599,8 +599,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -611,12 +611,12 @@ define @test_vluxseg3_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -631,8 +631,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -662,8 +662,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -695,8 +695,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -728,8 +728,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -760,8 +760,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -790,8 +790,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -820,8 +820,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -850,8 +850,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -880,8 +880,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -892,12 +892,12 @@ define @test_vluxseg3_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -912,8 +912,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -924,12 +924,12 @@ define @test_vluxseg3_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -944,8 +944,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -956,12 +956,12 @@ define @test_vluxseg3_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -976,8 +976,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -988,12 +988,12 @@ define @test_vluxseg3_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1008,8 +1008,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1020,13 +1020,13 @@ define @test_vluxseg4_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1041,8 +1041,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1053,13 +1053,13 @@ define @test_vluxseg4_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1074,8 +1074,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1086,13 +1086,13 @@ define @test_vluxseg4_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1107,8 +1107,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1119,13 +1119,13 @@ define @test_vluxseg4_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1140,8 +1140,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1152,14 +1152,14 @@ define @test_vluxseg5_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1174,8 +1174,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1186,14 +1186,14 @@ define @test_vluxseg5_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1208,8 +1208,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1220,14 +1220,14 @@ define @test_vluxseg5_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1242,8 +1242,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1254,14 +1254,14 @@ define @test_vluxseg5_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1276,8 +1276,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1288,15 +1288,15 @@ define @test_vluxseg6_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1311,8 +1311,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1323,15 +1323,15 @@ define @test_vluxseg6_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1346,8 +1346,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1358,15 +1358,15 @@ define @test_vluxseg6_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1381,8 +1381,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1393,15 +1393,15 @@ define @test_vluxseg6_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1416,8 +1416,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1428,16 +1428,16 @@ define @test_vluxseg7_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1452,8 +1452,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1464,16 +1464,16 @@ define @test_vluxseg7_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1488,8 +1488,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1500,16 +1500,16 @@ define @test_vluxseg7_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1524,8 +1524,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1536,16 +1536,16 @@ define @test_vluxseg7_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1560,8 +1560,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1597,8 +1597,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1634,8 +1634,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1671,8 +1671,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1708,8 +1708,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -1738,8 +1738,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -1768,8 +1768,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -1798,8 +1798,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -1828,8 +1828,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -1840,12 +1840,12 @@ define @test_vluxseg3_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1860,8 +1860,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -1872,12 +1872,12 @@ define @test_vluxseg3_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1892,8 +1892,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -1904,12 +1904,12 @@ define @test_vluxseg3_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1924,8 +1924,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -1936,12 +1936,12 @@ define @test_vluxseg3_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1956,8 +1956,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -1968,13 +1968,13 @@ define @test_vluxseg4_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1989,8 +1989,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2001,13 +2001,13 @@ define @test_vluxseg4_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2022,8 +2022,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2034,13 +2034,13 @@ define @test_vluxseg4_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2055,8 +2055,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2067,13 +2067,13 @@ define @test_vluxseg4_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2088,8 +2088,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2100,14 +2100,14 @@ define @test_vluxseg5_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2122,8 +2122,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2134,14 +2134,14 @@ define @test_vluxseg5_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2156,8 +2156,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2168,14 +2168,14 @@ define @test_vluxseg5_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2190,8 +2190,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2202,14 +2202,14 @@ define @test_vluxseg5_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2224,8 +2224,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2236,15 +2236,15 @@ define @test_vluxseg6_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2259,8 +2259,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2271,15 +2271,15 @@ define @test_vluxseg6_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2294,8 +2294,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2306,15 +2306,15 @@ define @test_vluxseg6_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2329,8 +2329,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2341,15 +2341,15 @@ define @test_vluxseg6_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2364,8 +2364,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2376,16 +2376,16 @@ define @test_vluxseg7_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2400,8 +2400,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2412,16 +2412,16 @@ define @test_vluxseg7_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2436,8 +2436,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2448,16 +2448,16 @@ define @test_vluxseg7_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2472,8 +2472,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2484,16 +2484,16 @@ define @test_vluxseg7_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2508,8 +2508,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2545,8 +2545,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2582,8 +2582,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2619,8 +2619,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2656,8 +2656,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2686,8 +2686,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2716,8 +2716,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -2746,8 +2746,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -2776,8 +2776,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2788,12 +2788,12 @@ define @test_vluxseg3_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2808,8 +2808,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2820,12 +2820,12 @@ define @test_vluxseg3_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2840,8 +2840,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -2871,8 +2871,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -2902,8 +2902,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2935,8 +2935,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2968,8 +2968,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -3000,8 +3000,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -3033,8 +3033,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3063,8 +3063,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3093,8 +3093,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3123,8 +3123,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3153,8 +3153,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3184,8 +3184,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3196,12 +3196,12 @@ define @test_vluxseg3_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3216,8 +3216,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3247,8 +3247,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3259,12 +3259,12 @@ define @test_vluxseg3_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3279,8 +3279,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3291,13 +3291,13 @@ define @test_vluxseg4_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3312,8 +3312,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3324,13 +3324,13 @@ define @test_vluxseg4_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3345,8 +3345,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3377,8 +3377,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3389,13 +3389,13 @@ define @test_vluxseg4_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3410,8 +3410,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3422,14 +3422,14 @@ define @test_vluxseg5_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3444,8 +3444,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3456,14 +3456,14 @@ define @test_vluxseg5_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3478,8 +3478,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3511,8 +3511,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3523,14 +3523,14 @@ define @test_vluxseg5_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3545,8 +3545,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3557,15 +3557,15 @@ define @test_vluxseg6_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3580,8 +3580,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3592,15 +3592,15 @@ define @test_vluxseg6_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3615,8 +3615,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3627,15 +3627,15 @@ define @test_vluxseg6_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3650,8 +3650,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3662,15 +3662,15 @@ define @test_vluxseg6_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3685,8 +3685,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3697,16 +3697,16 @@ define @test_vluxseg7_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3721,8 +3721,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3733,16 +3733,16 @@ define @test_vluxseg7_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3757,8 +3757,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3769,16 +3769,16 @@ define @test_vluxseg7_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3793,8 +3793,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3805,16 +3805,16 @@ define @test_vluxseg7_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3829,8 +3829,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3866,8 +3866,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3903,8 +3903,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3940,8 +3940,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3977,8 +3977,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4007,8 +4007,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4037,8 +4037,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4067,8 +4067,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4097,8 +4097,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4109,12 +4109,12 @@ define @test_vluxseg3_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4129,8 +4129,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4141,12 +4141,12 @@ define @test_vluxseg3_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4161,8 +4161,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4173,12 +4173,12 @@ define @test_vluxseg3_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4193,8 +4193,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4205,12 +4205,12 @@ define @test_vluxseg3_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4225,8 +4225,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4237,13 +4237,13 @@ define @test_vluxseg4_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4258,8 +4258,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4270,13 +4270,13 @@ define @test_vluxseg4_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4291,8 +4291,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4303,13 +4303,13 @@ define @test_vluxseg4_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4324,8 +4324,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4336,13 +4336,13 @@ define @test_vluxseg4_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4357,8 +4357,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4369,14 +4369,14 @@ define @test_vluxseg5_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4391,8 +4391,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4403,14 +4403,14 @@ define @test_vluxseg5_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4425,8 +4425,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4437,14 +4437,14 @@ define @test_vluxseg5_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4459,8 +4459,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4471,14 +4471,14 @@ define @test_vluxseg5_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4493,8 +4493,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4505,15 +4505,15 @@ define @test_vluxseg6_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4528,8 +4528,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4540,15 +4540,15 @@ define @test_vluxseg6_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4563,8 +4563,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4575,15 +4575,15 @@ define @test_vluxseg6_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4598,8 +4598,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4610,15 +4610,15 @@ define @test_vluxseg6_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4633,8 +4633,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4645,16 +4645,16 @@ define @test_vluxseg7_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4669,8 +4669,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4681,16 +4681,16 @@ define @test_vluxseg7_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4705,8 +4705,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4717,16 +4717,16 @@ define @test_vluxseg7_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4741,8 +4741,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4753,20 +4753,20 @@ define @test_vluxseg7_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) - %1 = extractvalue {,,,,,,} %0, 1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) + %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4777,8 +4777,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4814,8 +4814,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4851,8 +4851,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4888,8 +4888,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4925,8 +4925,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -4955,8 +4955,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -4985,8 +4985,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5015,8 +5015,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5045,8 +5045,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5057,12 +5057,12 @@ define @test_vluxseg3_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5077,8 +5077,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5089,12 +5089,12 @@ define @test_vluxseg3_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5109,8 +5109,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5121,12 +5121,12 @@ define @test_vluxseg3_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5141,8 +5141,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5172,8 +5172,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5184,13 +5184,13 @@ define @test_vluxseg4_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5205,8 +5205,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5217,13 +5217,13 @@ define @test_vluxseg4_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5238,8 +5238,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5250,13 +5250,13 @@ define @test_vluxseg4_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5271,8 +5271,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5283,13 +5283,13 @@ define @test_vluxseg4_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5304,8 +5304,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5316,14 +5316,14 @@ define @test_vluxseg5_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5338,8 +5338,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5350,14 +5350,14 @@ define @test_vluxseg5_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5372,8 +5372,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5384,14 +5384,14 @@ define @test_vluxseg5_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5406,8 +5406,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5418,14 +5418,14 @@ define @test_vluxseg5_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5440,8 +5440,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5452,15 +5452,15 @@ define @test_vluxseg6_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5475,8 +5475,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5487,15 +5487,15 @@ define @test_vluxseg6_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5510,8 +5510,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5522,15 +5522,15 @@ define @test_vluxseg6_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5545,8 +5545,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5557,15 +5557,15 @@ define @test_vluxseg6_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5580,8 +5580,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5592,16 +5592,16 @@ define @test_vluxseg7_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5616,8 +5616,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5628,16 +5628,16 @@ define @test_vluxseg7_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5652,8 +5652,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5664,16 +5664,16 @@ define @test_vluxseg7_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5688,8 +5688,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5700,16 +5700,16 @@ define @test_vluxseg7_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5724,8 +5724,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5761,8 +5761,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5798,8 +5798,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5835,8 +5835,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5872,8 +5872,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -5902,8 +5902,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -5932,8 +5932,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -5962,8 +5962,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -5992,8 +5992,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6023,8 +6023,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6035,12 +6035,12 @@ define @test_vluxseg3_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6055,8 +6055,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6086,8 +6086,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6117,8 +6117,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6129,13 +6129,13 @@ define @test_vluxseg4_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6150,8 +6150,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6162,13 +6162,13 @@ define @test_vluxseg4_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6183,8 +6183,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6215,8 +6215,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6247,8 +6247,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6259,14 +6259,14 @@ define @test_vluxseg5_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6281,8 +6281,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6293,14 +6293,14 @@ define @test_vluxseg5_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6315,8 +6315,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6348,8 +6348,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6381,8 +6381,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6393,15 +6393,15 @@ define @test_vluxseg6_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6416,8 +6416,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6428,15 +6428,15 @@ define @test_vluxseg6_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6451,8 +6451,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6485,8 +6485,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6497,15 +6497,15 @@ define @test_vluxseg6_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6520,8 +6520,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6532,16 +6532,16 @@ define @test_vluxseg7_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6556,8 +6556,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6568,16 +6568,16 @@ define @test_vluxseg7_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6592,8 +6592,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6627,8 +6627,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6639,16 +6639,16 @@ define @test_vluxseg7_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6663,8 +6663,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6700,8 +6700,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6737,8 +6737,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6773,8 +6773,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6810,8 +6810,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i32(i64* %base, %index, i64 %vl) @@ -6840,8 +6840,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i8(i64* %base, %index, i64 %vl) @@ -6870,8 +6870,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i64(i64* %base, %index, i64 %vl) @@ -6900,8 +6900,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i16(i64* %base, %index, i64 %vl) @@ -6930,8 +6930,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -6960,8 +6960,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -6990,8 +6990,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7020,8 +7020,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7050,8 +7050,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7081,8 +7081,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7093,12 +7093,12 @@ define @test_vluxseg3_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7113,8 +7113,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7144,8 +7144,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7156,12 +7156,12 @@ define @test_vluxseg3_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7176,8 +7176,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7188,13 +7188,13 @@ define @test_vluxseg4_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7209,8 +7209,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7221,13 +7221,13 @@ define @test_vluxseg4_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7242,8 +7242,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7274,8 +7274,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7286,13 +7286,13 @@ define @test_vluxseg4_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7307,8 +7307,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7319,14 +7319,14 @@ define @test_vluxseg5_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7341,8 +7341,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7353,14 +7353,14 @@ define @test_vluxseg5_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7375,8 +7375,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7408,8 +7408,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7420,14 +7420,14 @@ define @test_vluxseg5_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7442,8 +7442,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7454,15 +7454,15 @@ define @test_vluxseg6_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7477,8 +7477,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7489,15 +7489,15 @@ define @test_vluxseg6_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7512,8 +7512,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7524,15 +7524,15 @@ define @test_vluxseg6_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7547,8 +7547,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7559,15 +7559,15 @@ define @test_vluxseg6_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7582,8 +7582,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7594,16 +7594,16 @@ define @test_vluxseg7_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7618,8 +7618,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7630,16 +7630,16 @@ define @test_vluxseg7_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7654,8 +7654,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7666,16 +7666,16 @@ define @test_vluxseg7_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7690,8 +7690,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7702,16 +7702,16 @@ define @test_vluxseg7_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7726,8 +7726,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7763,8 +7763,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7800,8 +7800,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7837,8 +7837,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7874,8 +7874,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -7904,8 +7904,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -7934,8 +7934,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -7964,8 +7964,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -7994,8 +7994,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8006,12 +8006,12 @@ define @test_vluxseg3_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8026,8 +8026,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8038,12 +8038,12 @@ define @test_vluxseg3_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8058,8 +8058,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8070,12 +8070,12 @@ define @test_vluxseg3_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8090,8 +8090,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8102,12 +8102,12 @@ define @test_vluxseg3_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8122,8 +8122,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8134,13 +8134,13 @@ define @test_vluxseg4_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8155,8 +8155,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8167,13 +8167,13 @@ define @test_vluxseg4_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8188,8 +8188,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8200,13 +8200,13 @@ define @test_vluxseg4_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8221,8 +8221,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8233,13 +8233,13 @@ define @test_vluxseg4_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8254,8 +8254,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8266,14 +8266,14 @@ define @test_vluxseg5_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8288,8 +8288,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8300,14 +8300,14 @@ define @test_vluxseg5_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8322,8 +8322,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8334,14 +8334,14 @@ define @test_vluxseg5_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8356,8 +8356,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8368,14 +8368,14 @@ define @test_vluxseg5_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8390,8 +8390,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8402,15 +8402,15 @@ define @test_vluxseg6_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8425,8 +8425,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8437,15 +8437,15 @@ define @test_vluxseg6_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8460,8 +8460,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8472,15 +8472,15 @@ define @test_vluxseg6_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8495,8 +8495,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8507,15 +8507,15 @@ define @test_vluxseg6_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8530,8 +8530,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8542,16 +8542,16 @@ define @test_vluxseg7_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8566,8 +8566,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8578,16 +8578,16 @@ define @test_vluxseg7_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8602,8 +8602,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8614,16 +8614,16 @@ define @test_vluxseg7_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8638,8 +8638,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8650,16 +8650,16 @@ define @test_vluxseg7_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8674,8 +8674,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8711,8 +8711,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8748,8 +8748,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8785,8 +8785,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8822,8 +8822,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -8852,8 +8852,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -8882,8 +8882,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -8912,8 +8912,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -8942,8 +8942,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -8954,12 +8954,12 @@ define @test_vluxseg3_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8974,8 +8974,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -8986,12 +8986,12 @@ define @test_vluxseg3_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9006,8 +9006,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9018,12 +9018,12 @@ define @test_vluxseg3_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9038,8 +9038,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9069,8 +9069,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9081,13 +9081,13 @@ define @test_vluxseg4_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9102,8 +9102,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9114,13 +9114,13 @@ define @test_vluxseg4_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9135,8 +9135,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9147,13 +9147,13 @@ define @test_vluxseg4_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9168,8 +9168,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9180,13 +9180,13 @@ define @test_vluxseg4_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9201,8 +9201,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9213,14 +9213,14 @@ define @test_vluxseg5_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9235,8 +9235,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9247,14 +9247,14 @@ define @test_vluxseg5_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9269,8 +9269,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9281,14 +9281,14 @@ define @test_vluxseg5_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9303,8 +9303,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9315,14 +9315,14 @@ define @test_vluxseg5_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9337,8 +9337,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9349,15 +9349,15 @@ define @test_vluxseg6_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9372,8 +9372,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9384,15 +9384,15 @@ define @test_vluxseg6_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9407,8 +9407,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9419,15 +9419,15 @@ define @test_vluxseg6_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9442,8 +9442,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9454,15 +9454,15 @@ define @test_vluxseg6_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9477,8 +9477,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9489,16 +9489,16 @@ define @test_vluxseg7_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9513,8 +9513,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9525,16 +9525,16 @@ define @test_vluxseg7_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9549,8 +9549,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9561,16 +9561,16 @@ define @test_vluxseg7_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9585,8 +9585,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9597,16 +9597,16 @@ define @test_vluxseg7_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9621,8 +9621,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9658,8 +9658,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9695,8 +9695,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9732,8 +9732,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9769,8 +9769,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(i32* %base, %index, i64 %vl) @@ -9799,8 +9799,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(i32* %base, %index, i64 %vl) @@ -9829,8 +9829,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i64(i32* %base, %index, i64 %vl) @@ -9859,8 +9859,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(i32* %base, %index, i64 %vl) @@ -9889,8 +9889,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(i8* %base, %index, i64 %vl) @@ -9919,8 +9919,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(i8* %base, %index, i64 %vl) @@ -9949,8 +9949,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -9979,8 +9979,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10009,8 +10009,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10039,8 +10039,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10069,8 +10069,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10081,12 +10081,12 @@ define @test_vluxseg3_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10101,8 +10101,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10113,12 +10113,12 @@ define @test_vluxseg3_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10133,8 +10133,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10145,12 +10145,12 @@ define @test_vluxseg3_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10165,8 +10165,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10196,8 +10196,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10208,13 +10208,13 @@ define @test_vluxseg4_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10229,8 +10229,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10241,13 +10241,13 @@ define @test_vluxseg4_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10262,8 +10262,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10274,13 +10274,13 @@ define @test_vluxseg4_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10295,8 +10295,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10307,13 +10307,13 @@ define @test_vluxseg4_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10328,8 +10328,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10340,14 +10340,14 @@ define @test_vluxseg5_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10362,8 +10362,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10374,14 +10374,14 @@ define @test_vluxseg5_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10396,8 +10396,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10408,14 +10408,14 @@ define @test_vluxseg5_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10430,8 +10430,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10442,14 +10442,14 @@ define @test_vluxseg5_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10464,8 +10464,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10476,15 +10476,15 @@ define @test_vluxseg6_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10499,8 +10499,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10511,15 +10511,15 @@ define @test_vluxseg6_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10534,8 +10534,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10546,15 +10546,15 @@ define @test_vluxseg6_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10569,8 +10569,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10581,15 +10581,15 @@ define @test_vluxseg6_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10604,8 +10604,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10616,16 +10616,16 @@ define @test_vluxseg7_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10640,8 +10640,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10652,16 +10652,16 @@ define @test_vluxseg7_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10676,8 +10676,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10688,16 +10688,16 @@ define @test_vluxseg7_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10712,8 +10712,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10724,16 +10724,16 @@ define @test_vluxseg7_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10748,8 +10748,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10785,8 +10785,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10822,8 +10822,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10859,8 +10859,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10896,8 +10896,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -10926,8 +10926,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -10956,8 +10956,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -10986,8 +10986,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11016,8 +11016,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -11028,12 +11028,12 @@ define @test_vluxseg3_mask_nxv2i64_nxv2i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11048,8 +11048,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -11060,12 +11060,12 @@ define @test_vluxseg3_mask_nxv2i64_nxv2i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11080,8 +11080,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -11092,12 +11092,12 @@ define @test_vluxseg3_mask_nxv2i64_nxv2i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11112,8 +11112,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11124,12 +11124,12 @@ define @test_vluxseg3_mask_nxv2i64_nxv2i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11144,8 +11144,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -11177,8 +11177,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -11210,8 +11210,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -11243,8 +11243,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11276,8 +11276,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(half* %base, %index, i64 %vl) @@ -11306,8 +11306,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(half* %base, %index, i64 %vl) @@ -11336,8 +11336,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(half* %base, %index, i64 %vl) @@ -11366,8 +11366,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(double* %base, %index, i64 %vl) @@ -11396,8 +11396,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(double* %base, %index, i64 %vl) @@ -11426,8 +11426,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i64(double* %base, %index, i64 %vl) @@ -11456,8 +11456,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(double* %base, %index, i64 %vl) @@ -11486,8 +11486,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11516,8 +11516,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11546,8 +11546,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11576,8 +11576,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11606,8 +11606,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11618,12 +11618,12 @@ define @test_vluxseg3_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11638,8 +11638,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11650,12 +11650,12 @@ define @test_vluxseg3_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11670,8 +11670,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11682,12 +11682,12 @@ define @test_vluxseg3_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11702,8 +11702,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11714,12 +11714,12 @@ define @test_vluxseg3_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11734,8 +11734,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11746,13 +11746,13 @@ define @test_vluxseg4_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11767,8 +11767,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11779,13 +11779,13 @@ define @test_vluxseg4_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11800,8 +11800,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11812,13 +11812,13 @@ define @test_vluxseg4_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11833,8 +11833,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11845,13 +11845,13 @@ define @test_vluxseg4_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11866,8 +11866,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11878,14 +11878,14 @@ define @test_vluxseg5_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11900,8 +11900,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11912,14 +11912,14 @@ define @test_vluxseg5_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11934,8 +11934,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11946,14 +11946,14 @@ define @test_vluxseg5_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11968,8 +11968,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11980,14 +11980,14 @@ define @test_vluxseg5_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12002,8 +12002,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12014,15 +12014,15 @@ define @test_vluxseg6_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12037,8 +12037,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12049,15 +12049,15 @@ define @test_vluxseg6_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12072,8 +12072,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12084,15 +12084,15 @@ define @test_vluxseg6_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12107,8 +12107,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12119,15 +12119,15 @@ define @test_vluxseg6_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12142,8 +12142,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12154,16 +12154,16 @@ define @test_vluxseg7_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12178,8 +12178,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12190,16 +12190,16 @@ define @test_vluxseg7_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12214,8 +12214,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12226,16 +12226,16 @@ define @test_vluxseg7_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12250,8 +12250,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12262,16 +12262,16 @@ define @test_vluxseg7_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12286,8 +12286,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12323,8 +12323,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12360,8 +12360,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12397,8 +12397,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12434,8 +12434,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12464,8 +12464,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12494,8 +12494,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12524,8 +12524,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12554,8 +12554,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12566,12 +12566,12 @@ define @test_vluxseg3_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12586,8 +12586,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12598,12 +12598,12 @@ define @test_vluxseg3_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12618,8 +12618,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12630,12 +12630,12 @@ define @test_vluxseg3_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12650,8 +12650,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12681,8 +12681,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12693,13 +12693,13 @@ define @test_vluxseg4_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12714,8 +12714,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12726,13 +12726,13 @@ define @test_vluxseg4_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12747,8 +12747,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12759,13 +12759,13 @@ define @test_vluxseg4_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12780,8 +12780,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12792,13 +12792,13 @@ define @test_vluxseg4_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12813,8 +12813,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12825,14 +12825,14 @@ define @test_vluxseg5_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12847,8 +12847,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12859,14 +12859,14 @@ define @test_vluxseg5_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12881,8 +12881,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12893,14 +12893,14 @@ define @test_vluxseg5_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12915,8 +12915,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12927,14 +12927,14 @@ define @test_vluxseg5_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12949,8 +12949,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12961,15 +12961,15 @@ define @test_vluxseg6_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12984,8 +12984,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12996,15 +12996,15 @@ define @test_vluxseg6_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13019,8 +13019,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13031,15 +13031,15 @@ define @test_vluxseg6_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13054,8 +13054,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13066,15 +13066,15 @@ define @test_vluxseg6_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13089,8 +13089,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -13101,16 +13101,16 @@ define @test_vluxseg7_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13125,8 +13125,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13137,16 +13137,16 @@ define @test_vluxseg7_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13161,8 +13161,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13173,16 +13173,16 @@ define @test_vluxseg7_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13197,8 +13197,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13209,16 +13209,16 @@ define @test_vluxseg7_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13233,8 +13233,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -13270,8 +13270,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13307,8 +13307,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13344,8 +13344,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13381,8 +13381,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13411,8 +13411,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13441,8 +13441,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13471,8 +13471,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13501,8 +13501,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13513,12 +13513,12 @@ define @test_vluxseg3_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13533,8 +13533,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13545,12 +13545,12 @@ define @test_vluxseg3_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13565,8 +13565,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13577,12 +13577,12 @@ define @test_vluxseg3_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13597,8 +13597,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13609,12 +13609,12 @@ define @test_vluxseg3_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13629,8 +13629,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13641,13 +13641,13 @@ define @test_vluxseg4_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13662,8 +13662,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13674,13 +13674,13 @@ define @test_vluxseg4_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13695,8 +13695,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13707,13 +13707,13 @@ define @test_vluxseg4_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13728,8 +13728,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13740,13 +13740,13 @@ define @test_vluxseg4_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13761,8 +13761,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13773,14 +13773,14 @@ define @test_vluxseg5_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13795,8 +13795,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13807,14 +13807,14 @@ define @test_vluxseg5_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13829,8 +13829,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13841,14 +13841,14 @@ define @test_vluxseg5_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13863,8 +13863,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13875,14 +13875,14 @@ define @test_vluxseg5_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13897,8 +13897,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13909,15 +13909,15 @@ define @test_vluxseg6_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13932,8 +13932,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13944,15 +13944,15 @@ define @test_vluxseg6_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13967,8 +13967,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13979,15 +13979,15 @@ define @test_vluxseg6_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14002,8 +14002,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -14014,15 +14014,15 @@ define @test_vluxseg6_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14037,8 +14037,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -14049,16 +14049,16 @@ define @test_vluxseg7_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14073,8 +14073,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -14085,16 +14085,16 @@ define @test_vluxseg7_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14109,8 +14109,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -14121,16 +14121,16 @@ define @test_vluxseg7_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14145,8 +14145,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -14157,16 +14157,16 @@ define @test_vluxseg7_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14181,8 +14181,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -14218,8 +14218,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -14255,8 +14255,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -14292,8 +14292,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -14329,8 +14329,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14359,8 +14359,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14389,8 +14389,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14419,8 +14419,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14449,8 +14449,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14461,12 +14461,12 @@ define @test_vluxseg3_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14481,8 +14481,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14493,12 +14493,12 @@ define @test_vluxseg3_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14513,8 +14513,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14525,12 +14525,12 @@ define @test_vluxseg3_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14545,8 +14545,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14557,12 +14557,12 @@ define @test_vluxseg3_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14577,8 +14577,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14589,13 +14589,13 @@ define @test_vluxseg4_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14610,8 +14610,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14622,13 +14622,13 @@ define @test_vluxseg4_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14643,8 +14643,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14655,13 +14655,13 @@ define @test_vluxseg4_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14676,8 +14676,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14688,13 +14688,13 @@ define @test_vluxseg4_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14709,8 +14709,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14721,14 +14721,14 @@ define @test_vluxseg5_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14743,8 +14743,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14755,14 +14755,14 @@ define @test_vluxseg5_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14777,8 +14777,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14789,14 +14789,14 @@ define @test_vluxseg5_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14811,8 +14811,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14823,14 +14823,14 @@ define @test_vluxseg5_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14845,8 +14845,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14857,15 +14857,15 @@ define @test_vluxseg6_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14880,8 +14880,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14892,15 +14892,15 @@ define @test_vluxseg6_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14915,8 +14915,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14927,15 +14927,15 @@ define @test_vluxseg6_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14950,8 +14950,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14962,15 +14962,15 @@ define @test_vluxseg6_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14985,8 +14985,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14997,16 +14997,16 @@ define @test_vluxseg7_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15021,8 +15021,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -15033,16 +15033,16 @@ define @test_vluxseg7_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15057,8 +15057,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -15069,16 +15069,16 @@ define @test_vluxseg7_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15093,8 +15093,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -15105,16 +15105,16 @@ define @test_vluxseg7_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15129,8 +15129,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -15166,8 +15166,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -15203,8 +15203,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -15240,8 +15240,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -15277,8 +15277,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15307,8 +15307,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15337,8 +15337,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15367,8 +15367,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15397,8 +15397,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15409,12 +15409,12 @@ define @test_vluxseg3_mask_nxv8f16_nxv8i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15429,8 +15429,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15441,12 +15441,12 @@ define @test_vluxseg3_mask_nxv8f16_nxv8i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15461,8 +15461,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15492,8 +15492,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15523,8 +15523,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15556,8 +15556,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15589,8 +15589,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15621,8 +15621,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15654,8 +15654,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(float* %base, %index, i64 %vl) @@ -15684,8 +15684,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(float* %base, %index, i64 %vl) @@ -15714,8 +15714,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i64(float* %base, %index, i64 %vl) @@ -15744,8 +15744,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(float* %base, %index, i64 %vl) @@ -15774,8 +15774,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -15804,8 +15804,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -15834,8 +15834,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -15864,8 +15864,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -15894,8 +15894,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -15906,12 +15906,12 @@ define @test_vluxseg3_mask_nxv2f64_nxv2i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15926,8 +15926,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -15938,12 +15938,12 @@ define @test_vluxseg3_mask_nxv2f64_nxv2i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15958,8 +15958,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -15970,12 +15970,12 @@ define @test_vluxseg3_mask_nxv2f64_nxv2i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15990,8 +15990,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -16002,12 +16002,12 @@ define @test_vluxseg3_mask_nxv2f64_nxv2i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -16022,8 +16022,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -16055,8 +16055,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -16088,8 +16088,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -16121,8 +16121,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -16154,8 +16154,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16184,8 +16184,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16214,8 +16214,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16244,8 +16244,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16274,8 +16274,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16305,8 +16305,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16317,12 +16317,12 @@ define @test_vluxseg3_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16337,8 +16337,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16368,8 +16368,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16380,12 +16380,12 @@ define @test_vluxseg3_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16400,8 +16400,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16412,13 +16412,13 @@ define @test_vluxseg4_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16433,8 +16433,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16445,13 +16445,13 @@ define @test_vluxseg4_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16466,8 +16466,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16498,8 +16498,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16510,13 +16510,13 @@ define @test_vluxseg4_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16531,8 +16531,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16543,14 +16543,14 @@ define @test_vluxseg5_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16565,8 +16565,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16577,14 +16577,14 @@ define @test_vluxseg5_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16599,8 +16599,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16632,8 +16632,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16644,14 +16644,14 @@ define @test_vluxseg5_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16666,8 +16666,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16678,15 +16678,15 @@ define @test_vluxseg6_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16701,8 +16701,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16713,15 +16713,15 @@ define @test_vluxseg6_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16736,8 +16736,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16748,15 +16748,15 @@ define @test_vluxseg6_mask_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16771,8 +16771,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16783,15 +16783,15 @@ define @test_vluxseg6_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16806,8 +16806,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16818,16 +16818,16 @@ define @test_vluxseg7_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16842,8 +16842,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16854,16 +16854,16 @@ define @test_vluxseg7_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16878,8 +16878,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16890,16 +16890,16 @@ define @test_vluxseg7_mask_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v12, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16914,8 +16914,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16926,16 +16926,16 @@ define @test_vluxseg7_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16950,8 +16950,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16987,8 +16987,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -17024,8 +17024,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -17061,8 +17061,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -17098,8 +17098,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17128,8 +17128,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17158,8 +17158,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17188,8 +17188,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17218,8 +17218,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17230,12 +17230,12 @@ define @test_vluxseg3_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17250,8 +17250,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17262,12 +17262,12 @@ define @test_vluxseg3_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17282,8 +17282,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17294,12 +17294,12 @@ define @test_vluxseg3_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17314,8 +17314,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17345,8 +17345,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17357,13 +17357,13 @@ define @test_vluxseg4_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17378,8 +17378,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17390,13 +17390,13 @@ define @test_vluxseg4_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17411,8 +17411,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17423,13 +17423,13 @@ define @test_vluxseg4_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17444,8 +17444,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17456,13 +17456,13 @@ define @test_vluxseg4_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17477,8 +17477,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17489,14 +17489,14 @@ define @test_vluxseg5_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17511,8 +17511,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17523,14 +17523,14 @@ define @test_vluxseg5_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17545,8 +17545,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17557,14 +17557,14 @@ define @test_vluxseg5_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17579,8 +17579,8 @@ ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17591,14 +17591,14 @@ define @test_vluxseg5_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17613,8 +17613,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17625,15 +17625,15 @@ define @test_vluxseg6_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17648,8 +17648,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17660,15 +17660,15 @@ define @test_vluxseg6_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17683,8 +17683,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17695,15 +17695,15 @@ define @test_vluxseg6_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17718,8 +17718,8 @@ ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17730,15 +17730,15 @@ define @test_vluxseg6_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17753,8 +17753,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17765,16 +17765,16 @@ define @test_vluxseg7_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17789,8 +17789,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17801,16 +17801,16 @@ define @test_vluxseg7_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17825,8 +17825,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17837,16 +17837,16 @@ define @test_vluxseg7_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17861,8 +17861,8 @@ ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17873,16 +17873,16 @@ define @test_vluxseg7_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17897,8 +17897,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17934,8 +17934,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17971,8 +17971,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -18008,8 +18008,8 @@ ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -18045,8 +18045,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18075,8 +18075,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18105,8 +18105,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18135,8 +18135,8 @@ ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) @@ -18165,8 +18165,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18177,12 +18177,12 @@ define @test_vluxseg3_mask_nxv4f32_nxv4i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18197,8 +18197,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18209,12 +18209,12 @@ define @test_vluxseg3_mask_nxv4f32_nxv4i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18229,8 +18229,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18260,8 +18260,8 @@ ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) @@ -18272,12 +18272,12 @@ define @test_vluxseg3_mask_nxv4f32_nxv4i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18292,8 +18292,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18325,8 +18325,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18358,8 +18358,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18391,8 +18391,8 @@ ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll @@ -192,11 +192,11 @@ define void @test_vsoxseg3_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -206,11 +206,11 @@ define void @test_vsoxseg3_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -223,11 +223,11 @@ define void @test_vsoxseg3_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -237,11 +237,11 @@ define void @test_vsoxseg3_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -254,11 +254,11 @@ define void @test_vsoxseg3_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -268,11 +268,11 @@ define void @test_vsoxseg3_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -285,12 +285,12 @@ define void @test_vsoxseg4_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -300,12 +300,12 @@ define void @test_vsoxseg4_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -318,12 +318,12 @@ define void @test_vsoxseg4_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -333,12 +333,12 @@ define void @test_vsoxseg4_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -351,12 +351,12 @@ define void @test_vsoxseg4_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -366,12 +366,12 @@ define void @test_vsoxseg4_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -384,13 +384,13 @@ define void @test_vsoxseg5_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -400,13 +400,13 @@ define void @test_vsoxseg5_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -419,13 +419,13 @@ define void @test_vsoxseg5_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -435,13 +435,13 @@ define void @test_vsoxseg5_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -454,13 +454,13 @@ define void @test_vsoxseg5_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -470,13 +470,13 @@ define void @test_vsoxseg5_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -489,14 +489,14 @@ define void @test_vsoxseg6_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -506,14 +506,14 @@ define void @test_vsoxseg6_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -526,14 +526,14 @@ define void @test_vsoxseg6_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -543,14 +543,14 @@ define void @test_vsoxseg6_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -563,14 +563,14 @@ define void @test_vsoxseg6_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -580,14 +580,14 @@ define void @test_vsoxseg6_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -600,15 +600,15 @@ define void @test_vsoxseg7_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -618,15 +618,15 @@ define void @test_vsoxseg7_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -639,15 +639,15 @@ define void @test_vsoxseg7_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -657,15 +657,15 @@ define void @test_vsoxseg7_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -678,15 +678,15 @@ define void @test_vsoxseg7_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -696,15 +696,15 @@ define void @test_vsoxseg7_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -717,16 +717,16 @@ define void @test_vsoxseg8_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -736,16 +736,16 @@ define void @test_vsoxseg8_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -758,16 +758,16 @@ define void @test_vsoxseg8_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -777,16 +777,16 @@ define void @test_vsoxseg8_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -799,16 +799,16 @@ define void @test_vsoxseg8_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -818,16 +818,16 @@ define void @test_vsoxseg8_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -929,11 +929,11 @@ define void @test_vsoxseg3_nxv16i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -943,11 +943,11 @@ define void @test_vsoxseg3_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -960,11 +960,11 @@ define void @test_vsoxseg3_nxv16i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -974,11 +974,11 @@ define void @test_vsoxseg3_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -1022,12 +1022,12 @@ define void @test_vsoxseg4_nxv16i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -1037,12 +1037,12 @@ define void @test_vsoxseg4_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -1055,12 +1055,12 @@ define void @test_vsoxseg4_nxv16i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -1070,12 +1070,12 @@ define void @test_vsoxseg4_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -1214,11 +1214,11 @@ define void @test_vsoxseg3_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1228,11 +1228,11 @@ define void @test_vsoxseg3_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1245,11 +1245,11 @@ define void @test_vsoxseg3_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1259,11 +1259,11 @@ define void @test_vsoxseg3_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1276,11 +1276,11 @@ define void @test_vsoxseg3_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1290,11 +1290,11 @@ define void @test_vsoxseg3_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1307,12 +1307,12 @@ define void @test_vsoxseg4_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1322,12 +1322,12 @@ define void @test_vsoxseg4_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1340,12 +1340,12 @@ define void @test_vsoxseg4_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1355,12 +1355,12 @@ define void @test_vsoxseg4_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1373,12 +1373,12 @@ define void @test_vsoxseg4_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1388,12 +1388,12 @@ define void @test_vsoxseg4_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1406,13 +1406,13 @@ define void @test_vsoxseg5_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1422,13 +1422,13 @@ define void @test_vsoxseg5_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1441,13 +1441,13 @@ define void @test_vsoxseg5_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1457,13 +1457,13 @@ define void @test_vsoxseg5_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1476,13 +1476,13 @@ define void @test_vsoxseg5_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1492,13 +1492,13 @@ define void @test_vsoxseg5_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1511,14 +1511,14 @@ define void @test_vsoxseg6_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1528,14 +1528,14 @@ define void @test_vsoxseg6_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1548,14 +1548,14 @@ define void @test_vsoxseg6_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1565,14 +1565,14 @@ define void @test_vsoxseg6_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1585,14 +1585,14 @@ define void @test_vsoxseg6_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1602,14 +1602,14 @@ define void @test_vsoxseg6_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1622,15 +1622,15 @@ define void @test_vsoxseg7_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1640,15 +1640,15 @@ define void @test_vsoxseg7_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1661,15 +1661,15 @@ define void @test_vsoxseg7_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1679,15 +1679,15 @@ define void @test_vsoxseg7_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1700,15 +1700,15 @@ define void @test_vsoxseg7_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1718,15 +1718,15 @@ define void @test_vsoxseg7_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1739,16 +1739,16 @@ define void @test_vsoxseg8_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1758,16 +1758,16 @@ define void @test_vsoxseg8_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1780,16 +1780,16 @@ define void @test_vsoxseg8_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1799,16 +1799,16 @@ define void @test_vsoxseg8_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1821,16 +1821,16 @@ define void @test_vsoxseg8_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1840,16 +1840,16 @@ define void @test_vsoxseg8_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1953,11 +1953,11 @@ define void @test_vsoxseg3_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -1967,11 +1967,11 @@ define void @test_vsoxseg3_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -1984,11 +1984,11 @@ define void @test_vsoxseg3_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -1998,11 +1998,11 @@ define void @test_vsoxseg3_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2015,11 +2015,11 @@ define void @test_vsoxseg3_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2029,11 +2029,11 @@ define void @test_vsoxseg3_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2046,12 +2046,12 @@ define void @test_vsoxseg4_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2061,12 +2061,12 @@ define void @test_vsoxseg4_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2079,12 +2079,12 @@ define void @test_vsoxseg4_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2094,12 +2094,12 @@ define void @test_vsoxseg4_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2112,12 +2112,12 @@ define void @test_vsoxseg4_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2127,12 +2127,12 @@ define void @test_vsoxseg4_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2145,13 +2145,13 @@ define void @test_vsoxseg5_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2161,13 +2161,13 @@ define void @test_vsoxseg5_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2180,13 +2180,13 @@ define void @test_vsoxseg5_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2196,13 +2196,13 @@ define void @test_vsoxseg5_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2215,13 +2215,13 @@ define void @test_vsoxseg5_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2231,13 +2231,13 @@ define void @test_vsoxseg5_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2250,14 +2250,14 @@ define void @test_vsoxseg6_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2267,14 +2267,14 @@ define void @test_vsoxseg6_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2287,14 +2287,14 @@ define void @test_vsoxseg6_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2304,14 +2304,14 @@ define void @test_vsoxseg6_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2324,14 +2324,14 @@ define void @test_vsoxseg6_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2341,14 +2341,14 @@ define void @test_vsoxseg6_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2361,15 +2361,15 @@ define void @test_vsoxseg7_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2379,15 +2379,15 @@ define void @test_vsoxseg7_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2400,15 +2400,15 @@ define void @test_vsoxseg7_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2418,15 +2418,15 @@ define void @test_vsoxseg7_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2439,15 +2439,15 @@ define void @test_vsoxseg7_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2457,15 +2457,15 @@ define void @test_vsoxseg7_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2478,16 +2478,16 @@ define void @test_vsoxseg8_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2497,16 +2497,16 @@ define void @test_vsoxseg8_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2519,16 +2519,16 @@ define void @test_vsoxseg8_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2538,16 +2538,16 @@ define void @test_vsoxseg8_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2560,16 +2560,16 @@ define void @test_vsoxseg8_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2579,16 +2579,16 @@ define void @test_vsoxseg8_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2694,11 +2694,11 @@ define void @test_vsoxseg3_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2708,11 +2708,11 @@ define void @test_vsoxseg3_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2725,11 +2725,11 @@ define void @test_vsoxseg3_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2739,11 +2739,11 @@ define void @test_vsoxseg3_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2756,11 +2756,11 @@ define void @test_vsoxseg3_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2770,11 +2770,11 @@ define void @test_vsoxseg3_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2787,12 +2787,12 @@ define void @test_vsoxseg4_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2802,12 +2802,12 @@ define void @test_vsoxseg4_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2820,12 +2820,12 @@ define void @test_vsoxseg4_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2835,12 +2835,12 @@ define void @test_vsoxseg4_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2853,12 +2853,12 @@ define void @test_vsoxseg4_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2868,12 +2868,12 @@ define void @test_vsoxseg4_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2886,13 +2886,13 @@ define void @test_vsoxseg5_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2902,13 +2902,13 @@ define void @test_vsoxseg5_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2921,13 +2921,13 @@ define void @test_vsoxseg5_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2937,13 +2937,13 @@ define void @test_vsoxseg5_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2956,13 +2956,13 @@ define void @test_vsoxseg5_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2972,13 +2972,13 @@ define void @test_vsoxseg5_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2991,14 +2991,14 @@ define void @test_vsoxseg6_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3008,14 +3008,14 @@ define void @test_vsoxseg6_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3028,14 +3028,14 @@ define void @test_vsoxseg6_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3045,14 +3045,14 @@ define void @test_vsoxseg6_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3065,14 +3065,14 @@ define void @test_vsoxseg6_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3082,14 +3082,14 @@ define void @test_vsoxseg6_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3102,15 +3102,15 @@ define void @test_vsoxseg7_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3120,15 +3120,15 @@ define void @test_vsoxseg7_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3141,15 +3141,15 @@ define void @test_vsoxseg7_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3159,15 +3159,15 @@ define void @test_vsoxseg7_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3180,15 +3180,15 @@ define void @test_vsoxseg7_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3198,15 +3198,15 @@ define void @test_vsoxseg7_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3219,16 +3219,16 @@ define void @test_vsoxseg8_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3238,16 +3238,16 @@ define void @test_vsoxseg8_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3260,16 +3260,16 @@ define void @test_vsoxseg8_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3279,16 +3279,16 @@ define void @test_vsoxseg8_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3301,16 +3301,16 @@ define void @test_vsoxseg8_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3320,16 +3320,16 @@ define void @test_vsoxseg8_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3433,11 +3433,11 @@ define void @test_vsoxseg3_nxv8i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3447,11 +3447,11 @@ define void @test_vsoxseg3_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3464,11 +3464,11 @@ define void @test_vsoxseg3_nxv8i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3478,11 +3478,11 @@ define void @test_vsoxseg3_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3495,11 +3495,11 @@ define void @test_vsoxseg3_nxv8i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3509,11 +3509,11 @@ define void @test_vsoxseg3_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3526,12 +3526,12 @@ define void @test_vsoxseg4_nxv8i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3541,12 +3541,12 @@ define void @test_vsoxseg4_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3559,12 +3559,12 @@ define void @test_vsoxseg4_nxv8i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3574,12 +3574,12 @@ define void @test_vsoxseg4_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3592,12 +3592,12 @@ define void @test_vsoxseg4_nxv8i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3607,12 +3607,12 @@ define void @test_vsoxseg4_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3714,11 +3714,11 @@ define void @test_vsoxseg3_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3728,11 +3728,11 @@ define void @test_vsoxseg3_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3745,11 +3745,11 @@ define void @test_vsoxseg3_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3759,11 +3759,11 @@ define void @test_vsoxseg3_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3807,12 +3807,12 @@ define void @test_vsoxseg4_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3822,12 +3822,12 @@ define void @test_vsoxseg4_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3840,12 +3840,12 @@ define void @test_vsoxseg4_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3855,12 +3855,12 @@ define void @test_vsoxseg4_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3906,13 +3906,13 @@ define void @test_vsoxseg5_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3922,13 +3922,13 @@ define void @test_vsoxseg5_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3941,13 +3941,13 @@ define void @test_vsoxseg5_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3957,13 +3957,13 @@ define void @test_vsoxseg5_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3976,13 +3976,13 @@ define void @test_vsoxseg5_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3992,13 +3992,13 @@ define void @test_vsoxseg5_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4011,14 +4011,14 @@ define void @test_vsoxseg6_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4028,14 +4028,14 @@ define void @test_vsoxseg6_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4048,14 +4048,14 @@ define void @test_vsoxseg6_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4065,14 +4065,14 @@ define void @test_vsoxseg6_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4085,14 +4085,14 @@ define void @test_vsoxseg6_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4102,14 +4102,14 @@ define void @test_vsoxseg6_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4122,15 +4122,15 @@ define void @test_vsoxseg7_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4140,15 +4140,15 @@ define void @test_vsoxseg7_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4161,15 +4161,15 @@ define void @test_vsoxseg7_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4179,15 +4179,15 @@ define void @test_vsoxseg7_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4200,15 +4200,15 @@ define void @test_vsoxseg7_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4218,15 +4218,15 @@ define void @test_vsoxseg7_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4239,16 +4239,16 @@ define void @test_vsoxseg8_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4258,16 +4258,16 @@ define void @test_vsoxseg8_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4280,16 +4280,16 @@ define void @test_vsoxseg8_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4299,16 +4299,16 @@ define void @test_vsoxseg8_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4321,16 +4321,16 @@ define void @test_vsoxseg8_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4340,16 +4340,16 @@ define void @test_vsoxseg8_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4546,11 +4546,11 @@ define void @test_vsoxseg3_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4560,11 +4560,11 @@ define void @test_vsoxseg3_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4577,11 +4577,11 @@ define void @test_vsoxseg3_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4591,11 +4591,11 @@ define void @test_vsoxseg3_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4608,11 +4608,11 @@ define void @test_vsoxseg3_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4622,11 +4622,11 @@ define void @test_vsoxseg3_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4639,12 +4639,12 @@ define void @test_vsoxseg4_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4654,12 +4654,12 @@ define void @test_vsoxseg4_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4672,12 +4672,12 @@ define void @test_vsoxseg4_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4687,12 +4687,12 @@ define void @test_vsoxseg4_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4705,12 +4705,12 @@ define void @test_vsoxseg4_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4720,12 +4720,12 @@ define void @test_vsoxseg4_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4738,13 +4738,13 @@ define void @test_vsoxseg5_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4754,13 +4754,13 @@ define void @test_vsoxseg5_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4773,13 +4773,13 @@ define void @test_vsoxseg5_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4789,13 +4789,13 @@ define void @test_vsoxseg5_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4808,13 +4808,13 @@ define void @test_vsoxseg5_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4824,13 +4824,13 @@ define void @test_vsoxseg5_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4843,14 +4843,14 @@ define void @test_vsoxseg6_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4860,14 +4860,14 @@ define void @test_vsoxseg6_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4880,14 +4880,14 @@ define void @test_vsoxseg6_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4897,14 +4897,14 @@ define void @test_vsoxseg6_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4917,14 +4917,14 @@ define void @test_vsoxseg6_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4934,14 +4934,14 @@ define void @test_vsoxseg6_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4954,15 +4954,15 @@ define void @test_vsoxseg7_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4972,15 +4972,15 @@ define void @test_vsoxseg7_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4993,15 +4993,15 @@ define void @test_vsoxseg7_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5011,15 +5011,15 @@ define void @test_vsoxseg7_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5032,15 +5032,15 @@ define void @test_vsoxseg7_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5050,15 +5050,15 @@ define void @test_vsoxseg7_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5071,16 +5071,16 @@ define void @test_vsoxseg8_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5090,16 +5090,16 @@ define void @test_vsoxseg8_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5112,16 +5112,16 @@ define void @test_vsoxseg8_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5131,16 +5131,16 @@ define void @test_vsoxseg8_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5153,16 +5153,16 @@ define void @test_vsoxseg8_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5172,16 +5172,16 @@ define void @test_vsoxseg8_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5287,11 +5287,11 @@ define void @test_vsoxseg3_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5301,11 +5301,11 @@ define void @test_vsoxseg3_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5318,11 +5318,11 @@ define void @test_vsoxseg3_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5332,11 +5332,11 @@ define void @test_vsoxseg3_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5349,11 +5349,11 @@ define void @test_vsoxseg3_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5363,11 +5363,11 @@ define void @test_vsoxseg3_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5380,12 +5380,12 @@ define void @test_vsoxseg4_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5395,12 +5395,12 @@ define void @test_vsoxseg4_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5413,12 +5413,12 @@ define void @test_vsoxseg4_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5428,12 +5428,12 @@ define void @test_vsoxseg4_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5446,12 +5446,12 @@ define void @test_vsoxseg4_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5461,12 +5461,12 @@ define void @test_vsoxseg4_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5479,13 +5479,13 @@ define void @test_vsoxseg5_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5495,13 +5495,13 @@ define void @test_vsoxseg5_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5514,13 +5514,13 @@ define void @test_vsoxseg5_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5530,13 +5530,13 @@ define void @test_vsoxseg5_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5549,13 +5549,13 @@ define void @test_vsoxseg5_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5565,13 +5565,13 @@ define void @test_vsoxseg5_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5584,14 +5584,14 @@ define void @test_vsoxseg6_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5601,14 +5601,14 @@ define void @test_vsoxseg6_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5621,14 +5621,14 @@ define void @test_vsoxseg6_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5638,14 +5638,14 @@ define void @test_vsoxseg6_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5658,14 +5658,14 @@ define void @test_vsoxseg6_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5675,14 +5675,14 @@ define void @test_vsoxseg6_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5695,15 +5695,15 @@ define void @test_vsoxseg7_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5713,15 +5713,15 @@ define void @test_vsoxseg7_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5734,15 +5734,15 @@ define void @test_vsoxseg7_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5752,15 +5752,15 @@ define void @test_vsoxseg7_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5773,15 +5773,15 @@ define void @test_vsoxseg7_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5791,15 +5791,15 @@ define void @test_vsoxseg7_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5812,16 +5812,16 @@ define void @test_vsoxseg8_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5831,16 +5831,16 @@ define void @test_vsoxseg8_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5853,16 +5853,16 @@ define void @test_vsoxseg8_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5872,16 +5872,16 @@ define void @test_vsoxseg8_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5894,16 +5894,16 @@ define void @test_vsoxseg8_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5913,16 +5913,16 @@ define void @test_vsoxseg8_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6088,11 +6088,11 @@ define void @test_vsoxseg3_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6102,11 +6102,11 @@ define void @test_vsoxseg3_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6119,11 +6119,11 @@ define void @test_vsoxseg3_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6133,11 +6133,11 @@ define void @test_vsoxseg3_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6150,11 +6150,11 @@ define void @test_vsoxseg3_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6164,11 +6164,11 @@ define void @test_vsoxseg3_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6181,12 +6181,12 @@ define void @test_vsoxseg4_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6196,12 +6196,12 @@ define void @test_vsoxseg4_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6214,12 +6214,12 @@ define void @test_vsoxseg4_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6229,12 +6229,12 @@ define void @test_vsoxseg4_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6247,12 +6247,12 @@ define void @test_vsoxseg4_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6262,12 +6262,12 @@ define void @test_vsoxseg4_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6280,13 +6280,13 @@ define void @test_vsoxseg5_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6296,13 +6296,13 @@ define void @test_vsoxseg5_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6315,13 +6315,13 @@ define void @test_vsoxseg5_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6331,13 +6331,13 @@ define void @test_vsoxseg5_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6350,13 +6350,13 @@ define void @test_vsoxseg5_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6366,13 +6366,13 @@ define void @test_vsoxseg5_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6385,14 +6385,14 @@ define void @test_vsoxseg6_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6402,14 +6402,14 @@ define void @test_vsoxseg6_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6422,14 +6422,14 @@ define void @test_vsoxseg6_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6439,14 +6439,14 @@ define void @test_vsoxseg6_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6459,14 +6459,14 @@ define void @test_vsoxseg6_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6476,14 +6476,14 @@ define void @test_vsoxseg6_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6496,15 +6496,15 @@ define void @test_vsoxseg7_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6514,15 +6514,15 @@ define void @test_vsoxseg7_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6535,15 +6535,15 @@ define void @test_vsoxseg7_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6553,15 +6553,15 @@ define void @test_vsoxseg7_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6574,15 +6574,15 @@ define void @test_vsoxseg7_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6592,15 +6592,15 @@ define void @test_vsoxseg7_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6613,16 +6613,16 @@ define void @test_vsoxseg8_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6632,16 +6632,16 @@ define void @test_vsoxseg8_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6654,16 +6654,16 @@ define void @test_vsoxseg8_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6673,16 +6673,16 @@ define void @test_vsoxseg8_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6695,16 +6695,16 @@ define void @test_vsoxseg8_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6714,16 +6714,16 @@ define void @test_vsoxseg8_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6829,11 +6829,11 @@ define void @test_vsoxseg3_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6843,11 +6843,11 @@ define void @test_vsoxseg3_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6860,11 +6860,11 @@ define void @test_vsoxseg3_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6874,11 +6874,11 @@ define void @test_vsoxseg3_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6891,11 +6891,11 @@ define void @test_vsoxseg3_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6905,11 +6905,11 @@ define void @test_vsoxseg3_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6922,12 +6922,12 @@ define void @test_vsoxseg4_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6937,12 +6937,12 @@ define void @test_vsoxseg4_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6955,12 +6955,12 @@ define void @test_vsoxseg4_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6970,12 +6970,12 @@ define void @test_vsoxseg4_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6988,12 +6988,12 @@ define void @test_vsoxseg4_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7003,12 +7003,12 @@ define void @test_vsoxseg4_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7021,13 +7021,13 @@ define void @test_vsoxseg5_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7037,13 +7037,13 @@ define void @test_vsoxseg5_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7056,13 +7056,13 @@ define void @test_vsoxseg5_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7072,13 +7072,13 @@ define void @test_vsoxseg5_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7091,13 +7091,13 @@ define void @test_vsoxseg5_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7107,13 +7107,13 @@ define void @test_vsoxseg5_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7126,14 +7126,14 @@ define void @test_vsoxseg6_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7143,14 +7143,14 @@ define void @test_vsoxseg6_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7163,14 +7163,14 @@ define void @test_vsoxseg6_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7180,14 +7180,14 @@ define void @test_vsoxseg6_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7200,14 +7200,14 @@ define void @test_vsoxseg6_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7217,14 +7217,14 @@ define void @test_vsoxseg6_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7237,15 +7237,15 @@ define void @test_vsoxseg7_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7255,15 +7255,15 @@ define void @test_vsoxseg7_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7276,15 +7276,15 @@ define void @test_vsoxseg7_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7294,15 +7294,15 @@ define void @test_vsoxseg7_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7315,15 +7315,15 @@ define void @test_vsoxseg7_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7333,15 +7333,15 @@ define void @test_vsoxseg7_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7354,16 +7354,16 @@ define void @test_vsoxseg8_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7373,16 +7373,16 @@ define void @test_vsoxseg8_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7395,16 +7395,16 @@ define void @test_vsoxseg8_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7414,16 +7414,16 @@ define void @test_vsoxseg8_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7436,16 +7436,16 @@ define void @test_vsoxseg8_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7455,16 +7455,16 @@ define void @test_vsoxseg8_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7570,11 +7570,11 @@ define void @test_vsoxseg3_nxv4i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7584,11 +7584,11 @@ define void @test_vsoxseg3_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7601,11 +7601,11 @@ define void @test_vsoxseg3_nxv4i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7615,11 +7615,11 @@ define void @test_vsoxseg3_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7632,11 +7632,11 @@ define void @test_vsoxseg3_nxv4i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7646,11 +7646,11 @@ define void @test_vsoxseg3_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7663,12 +7663,12 @@ define void @test_vsoxseg4_nxv4i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7678,12 +7678,12 @@ define void @test_vsoxseg4_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7696,12 +7696,12 @@ define void @test_vsoxseg4_nxv4i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7711,12 +7711,12 @@ define void @test_vsoxseg4_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7729,12 +7729,12 @@ define void @test_vsoxseg4_nxv4i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7744,12 +7744,12 @@ define void @test_vsoxseg4_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -8039,11 +8039,11 @@ define void @test_vsoxseg3_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, i32 %vl) @@ -8053,11 +8053,11 @@ define void @test_vsoxseg3_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8070,11 +8070,11 @@ define void @test_vsoxseg3_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, i32 %vl) @@ -8084,11 +8084,11 @@ define void @test_vsoxseg3_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8101,11 +8101,11 @@ define void @test_vsoxseg3_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, i32 %vl) @@ -8115,11 +8115,11 @@ define void @test_vsoxseg3_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8132,12 +8132,12 @@ define void @test_vsoxseg4_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8147,12 +8147,12 @@ define void @test_vsoxseg4_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8165,12 +8165,12 @@ define void @test_vsoxseg4_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8180,12 +8180,12 @@ define void @test_vsoxseg4_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8198,12 +8198,12 @@ define void @test_vsoxseg4_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8213,12 +8213,12 @@ define void @test_vsoxseg4_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8231,13 +8231,13 @@ define void @test_vsoxseg5_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8247,13 +8247,13 @@ define void @test_vsoxseg5_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8266,13 +8266,13 @@ define void @test_vsoxseg5_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8282,13 +8282,13 @@ define void @test_vsoxseg5_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8301,13 +8301,13 @@ define void @test_vsoxseg5_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8317,13 +8317,13 @@ define void @test_vsoxseg5_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8336,14 +8336,14 @@ define void @test_vsoxseg6_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8353,14 +8353,14 @@ define void @test_vsoxseg6_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8373,14 +8373,14 @@ define void @test_vsoxseg6_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8390,14 +8390,14 @@ define void @test_vsoxseg6_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8410,14 +8410,14 @@ define void @test_vsoxseg6_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8427,14 +8427,14 @@ define void @test_vsoxseg6_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8447,15 +8447,15 @@ define void @test_vsoxseg7_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8465,15 +8465,15 @@ define void @test_vsoxseg7_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8486,15 +8486,15 @@ define void @test_vsoxseg7_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8504,15 +8504,15 @@ define void @test_vsoxseg7_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8525,15 +8525,15 @@ define void @test_vsoxseg7_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8543,15 +8543,15 @@ define void @test_vsoxseg7_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8564,16 +8564,16 @@ define void @test_vsoxseg8_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8583,16 +8583,16 @@ define void @test_vsoxseg8_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8605,16 +8605,16 @@ define void @test_vsoxseg8_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8624,16 +8624,16 @@ define void @test_vsoxseg8_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8646,16 +8646,16 @@ define void @test_vsoxseg8_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8665,16 +8665,16 @@ define void @test_vsoxseg8_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8780,11 +8780,11 @@ define void @test_vsoxseg3_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, i32 %vl) @@ -8794,11 +8794,11 @@ define void @test_vsoxseg3_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8811,11 +8811,11 @@ define void @test_vsoxseg3_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, i32 %vl) @@ -8825,11 +8825,11 @@ define void @test_vsoxseg3_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8842,11 +8842,11 @@ define void @test_vsoxseg3_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, i32 %vl) @@ -8856,11 +8856,11 @@ define void @test_vsoxseg3_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8873,12 +8873,12 @@ define void @test_vsoxseg4_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8888,12 +8888,12 @@ define void @test_vsoxseg4_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8906,12 +8906,12 @@ define void @test_vsoxseg4_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8921,12 +8921,12 @@ define void @test_vsoxseg4_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8939,12 +8939,12 @@ define void @test_vsoxseg4_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8954,12 +8954,12 @@ define void @test_vsoxseg4_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8972,13 +8972,13 @@ define void @test_vsoxseg5_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8988,13 +8988,13 @@ define void @test_vsoxseg5_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9007,13 +9007,13 @@ define void @test_vsoxseg5_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9023,13 +9023,13 @@ define void @test_vsoxseg5_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9042,13 +9042,13 @@ define void @test_vsoxseg5_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9058,13 +9058,13 @@ define void @test_vsoxseg5_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9077,14 +9077,14 @@ define void @test_vsoxseg6_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9094,14 +9094,14 @@ define void @test_vsoxseg6_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9114,14 +9114,14 @@ define void @test_vsoxseg6_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9131,14 +9131,14 @@ define void @test_vsoxseg6_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9151,14 +9151,14 @@ define void @test_vsoxseg6_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9168,14 +9168,14 @@ define void @test_vsoxseg6_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9188,15 +9188,15 @@ define void @test_vsoxseg7_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9206,15 +9206,15 @@ define void @test_vsoxseg7_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9227,15 +9227,15 @@ define void @test_vsoxseg7_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9245,15 +9245,15 @@ define void @test_vsoxseg7_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9266,15 +9266,15 @@ define void @test_vsoxseg7_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9284,15 +9284,15 @@ define void @test_vsoxseg7_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9305,16 +9305,16 @@ define void @test_vsoxseg8_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9324,16 +9324,16 @@ define void @test_vsoxseg8_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9346,16 +9346,16 @@ define void @test_vsoxseg8_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9365,16 +9365,16 @@ define void @test_vsoxseg8_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9387,16 +9387,16 @@ define void @test_vsoxseg8_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9406,16 +9406,16 @@ define void @test_vsoxseg8_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9521,11 +9521,11 @@ define void @test_vsoxseg3_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -9535,11 +9535,11 @@ define void @test_vsoxseg3_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9552,11 +9552,11 @@ define void @test_vsoxseg3_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -9566,11 +9566,11 @@ define void @test_vsoxseg3_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9583,11 +9583,11 @@ define void @test_vsoxseg3_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -9597,11 +9597,11 @@ define void @test_vsoxseg3_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9614,12 +9614,12 @@ define void @test_vsoxseg4_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9629,12 +9629,12 @@ define void @test_vsoxseg4_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9647,12 +9647,12 @@ define void @test_vsoxseg4_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9662,12 +9662,12 @@ define void @test_vsoxseg4_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9680,12 +9680,12 @@ define void @test_vsoxseg4_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9695,12 +9695,12 @@ define void @test_vsoxseg4_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9713,13 +9713,13 @@ define void @test_vsoxseg5_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9729,13 +9729,13 @@ define void @test_vsoxseg5_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9748,13 +9748,13 @@ define void @test_vsoxseg5_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9764,13 +9764,13 @@ define void @test_vsoxseg5_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9783,13 +9783,13 @@ define void @test_vsoxseg5_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9799,13 +9799,13 @@ define void @test_vsoxseg5_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9818,14 +9818,14 @@ define void @test_vsoxseg6_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9835,14 +9835,14 @@ define void @test_vsoxseg6_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9855,14 +9855,14 @@ define void @test_vsoxseg6_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9872,14 +9872,14 @@ define void @test_vsoxseg6_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9892,14 +9892,14 @@ define void @test_vsoxseg6_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9909,14 +9909,14 @@ define void @test_vsoxseg6_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9929,15 +9929,15 @@ define void @test_vsoxseg7_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9947,15 +9947,15 @@ define void @test_vsoxseg7_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9968,15 +9968,15 @@ define void @test_vsoxseg7_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9986,15 +9986,15 @@ define void @test_vsoxseg7_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10007,15 +10007,15 @@ define void @test_vsoxseg7_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10025,15 +10025,15 @@ define void @test_vsoxseg7_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10046,16 +10046,16 @@ define void @test_vsoxseg8_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10065,16 +10065,16 @@ define void @test_vsoxseg8_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10087,16 +10087,16 @@ define void @test_vsoxseg8_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10106,16 +10106,16 @@ define void @test_vsoxseg8_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10128,16 +10128,16 @@ define void @test_vsoxseg8_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10147,16 +10147,16 @@ define void @test_vsoxseg8_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10262,11 +10262,11 @@ define void @test_vsoxseg3_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, i32 %vl) @@ -10276,11 +10276,11 @@ define void @test_vsoxseg3_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10293,11 +10293,11 @@ define void @test_vsoxseg3_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, i32 %vl) @@ -10307,11 +10307,11 @@ define void @test_vsoxseg3_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10324,11 +10324,11 @@ define void @test_vsoxseg3_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, i32 %vl) @@ -10338,11 +10338,11 @@ define void @test_vsoxseg3_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10355,12 +10355,12 @@ define void @test_vsoxseg4_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10370,12 +10370,12 @@ define void @test_vsoxseg4_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10388,12 +10388,12 @@ define void @test_vsoxseg4_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10403,12 +10403,12 @@ define void @test_vsoxseg4_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10421,12 +10421,12 @@ define void @test_vsoxseg4_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10436,12 +10436,12 @@ define void @test_vsoxseg4_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10454,13 +10454,13 @@ define void @test_vsoxseg5_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10470,13 +10470,13 @@ define void @test_vsoxseg5_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10489,13 +10489,13 @@ define void @test_vsoxseg5_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10505,13 +10505,13 @@ define void @test_vsoxseg5_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10524,13 +10524,13 @@ define void @test_vsoxseg5_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10540,13 +10540,13 @@ define void @test_vsoxseg5_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10559,14 +10559,14 @@ define void @test_vsoxseg6_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10576,14 +10576,14 @@ define void @test_vsoxseg6_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10596,14 +10596,14 @@ define void @test_vsoxseg6_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10613,14 +10613,14 @@ define void @test_vsoxseg6_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10633,14 +10633,14 @@ define void @test_vsoxseg6_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10650,14 +10650,14 @@ define void @test_vsoxseg6_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10670,15 +10670,15 @@ define void @test_vsoxseg7_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10688,15 +10688,15 @@ define void @test_vsoxseg7_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10709,15 +10709,15 @@ define void @test_vsoxseg7_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10727,15 +10727,15 @@ define void @test_vsoxseg7_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10748,15 +10748,15 @@ define void @test_vsoxseg7_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10766,15 +10766,15 @@ define void @test_vsoxseg7_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10787,16 +10787,16 @@ define void @test_vsoxseg8_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10806,16 +10806,16 @@ define void @test_vsoxseg8_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10828,16 +10828,16 @@ define void @test_vsoxseg8_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10847,16 +10847,16 @@ define void @test_vsoxseg8_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10869,16 +10869,16 @@ define void @test_vsoxseg8_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10888,16 +10888,16 @@ define void @test_vsoxseg8_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -11001,11 +11001,11 @@ define void @test_vsoxseg3_nxv8f16_nxv8i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11015,11 +11015,11 @@ define void @test_vsoxseg3_mask_nxv8f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11032,11 +11032,11 @@ define void @test_vsoxseg3_nxv8f16_nxv8i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11046,11 +11046,11 @@ define void @test_vsoxseg3_mask_nxv8f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11063,11 +11063,11 @@ define void @test_vsoxseg3_nxv8f16_nxv8i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11077,11 +11077,11 @@ define void @test_vsoxseg3_mask_nxv8f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11094,12 +11094,12 @@ define void @test_vsoxseg4_nxv8f16_nxv8i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11109,12 +11109,12 @@ define void @test_vsoxseg4_mask_nxv8f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11127,12 +11127,12 @@ define void @test_vsoxseg4_nxv8f16_nxv8i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11142,12 +11142,12 @@ define void @test_vsoxseg4_mask_nxv8f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11160,12 +11160,12 @@ define void @test_vsoxseg4_nxv8f16_nxv8i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11175,12 +11175,12 @@ define void @test_vsoxseg4_mask_nxv8f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11379,11 +11379,11 @@ define void @test_vsoxseg3_nxv2f64_nxv2i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, i32 %vl) @@ -11393,11 +11393,11 @@ define void @test_vsoxseg3_mask_nxv2f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11410,11 +11410,11 @@ define void @test_vsoxseg3_nxv2f64_nxv2i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, i32 %vl) @@ -11424,11 +11424,11 @@ define void @test_vsoxseg3_mask_nxv2f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11441,11 +11441,11 @@ define void @test_vsoxseg3_nxv2f64_nxv2i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, i32 %vl) @@ -11455,11 +11455,11 @@ define void @test_vsoxseg3_mask_nxv2f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11472,12 +11472,12 @@ define void @test_vsoxseg4_nxv2f64_nxv2i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -11487,12 +11487,12 @@ define void @test_vsoxseg4_mask_nxv2f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11505,12 +11505,12 @@ define void @test_vsoxseg4_nxv2f64_nxv2i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -11520,12 +11520,12 @@ define void @test_vsoxseg4_mask_nxv2f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11538,12 +11538,12 @@ define void @test_vsoxseg4_nxv2f64_nxv2i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -11553,12 +11553,12 @@ define void @test_vsoxseg4_mask_nxv2f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11662,11 +11662,11 @@ define void @test_vsoxseg3_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11676,11 +11676,11 @@ define void @test_vsoxseg3_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11693,11 +11693,11 @@ define void @test_vsoxseg3_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11707,11 +11707,11 @@ define void @test_vsoxseg3_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11724,11 +11724,11 @@ define void @test_vsoxseg3_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11738,11 +11738,11 @@ define void @test_vsoxseg3_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11755,12 +11755,12 @@ define void @test_vsoxseg4_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11770,12 +11770,12 @@ define void @test_vsoxseg4_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11788,12 +11788,12 @@ define void @test_vsoxseg4_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11803,12 +11803,12 @@ define void @test_vsoxseg4_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11821,12 +11821,12 @@ define void @test_vsoxseg4_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11836,12 +11836,12 @@ define void @test_vsoxseg4_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11854,13 +11854,13 @@ define void @test_vsoxseg5_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11870,13 +11870,13 @@ define void @test_vsoxseg5_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11889,13 +11889,13 @@ define void @test_vsoxseg5_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11905,13 +11905,13 @@ define void @test_vsoxseg5_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11924,13 +11924,13 @@ define void @test_vsoxseg5_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11940,13 +11940,13 @@ define void @test_vsoxseg5_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11959,14 +11959,14 @@ define void @test_vsoxseg6_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11976,14 +11976,14 @@ define void @test_vsoxseg6_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11996,14 +11996,14 @@ define void @test_vsoxseg6_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12013,14 +12013,14 @@ define void @test_vsoxseg6_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12033,14 +12033,14 @@ define void @test_vsoxseg6_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12050,14 +12050,14 @@ define void @test_vsoxseg6_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12070,15 +12070,15 @@ define void @test_vsoxseg7_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12088,15 +12088,15 @@ define void @test_vsoxseg7_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12109,15 +12109,15 @@ define void @test_vsoxseg7_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12127,15 +12127,15 @@ define void @test_vsoxseg7_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12148,15 +12148,15 @@ define void @test_vsoxseg7_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12166,15 +12166,15 @@ define void @test_vsoxseg7_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12187,16 +12187,16 @@ define void @test_vsoxseg8_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12206,16 +12206,16 @@ define void @test_vsoxseg8_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12228,16 +12228,16 @@ define void @test_vsoxseg8_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12247,16 +12247,16 @@ define void @test_vsoxseg8_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12269,16 +12269,16 @@ define void @test_vsoxseg8_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12288,16 +12288,16 @@ define void @test_vsoxseg8_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12403,11 +12403,11 @@ define void @test_vsoxseg3_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -12417,11 +12417,11 @@ define void @test_vsoxseg3_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12434,11 +12434,11 @@ define void @test_vsoxseg3_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -12448,11 +12448,11 @@ define void @test_vsoxseg3_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12465,11 +12465,11 @@ define void @test_vsoxseg3_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -12479,11 +12479,11 @@ define void @test_vsoxseg3_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12496,12 +12496,12 @@ define void @test_vsoxseg4_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12511,12 +12511,12 @@ define void @test_vsoxseg4_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12529,12 +12529,12 @@ define void @test_vsoxseg4_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12544,12 +12544,12 @@ define void @test_vsoxseg4_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12562,12 +12562,12 @@ define void @test_vsoxseg4_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12577,12 +12577,12 @@ define void @test_vsoxseg4_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12595,13 +12595,13 @@ define void @test_vsoxseg5_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12611,13 +12611,13 @@ define void @test_vsoxseg5_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12630,13 +12630,13 @@ define void @test_vsoxseg5_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12646,13 +12646,13 @@ define void @test_vsoxseg5_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12665,13 +12665,13 @@ define void @test_vsoxseg5_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12681,13 +12681,13 @@ define void @test_vsoxseg5_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12700,14 +12700,14 @@ define void @test_vsoxseg6_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12717,14 +12717,14 @@ define void @test_vsoxseg6_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12737,14 +12737,14 @@ define void @test_vsoxseg6_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12754,14 +12754,14 @@ define void @test_vsoxseg6_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12774,14 +12774,14 @@ define void @test_vsoxseg6_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12791,14 +12791,14 @@ define void @test_vsoxseg6_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12811,15 +12811,15 @@ define void @test_vsoxseg7_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12829,15 +12829,15 @@ define void @test_vsoxseg7_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12850,15 +12850,15 @@ define void @test_vsoxseg7_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12868,15 +12868,15 @@ define void @test_vsoxseg7_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12889,15 +12889,15 @@ define void @test_vsoxseg7_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12907,15 +12907,15 @@ define void @test_vsoxseg7_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12928,16 +12928,16 @@ define void @test_vsoxseg8_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12947,16 +12947,16 @@ define void @test_vsoxseg8_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12969,16 +12969,16 @@ define void @test_vsoxseg8_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12988,16 +12988,16 @@ define void @test_vsoxseg8_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -13010,16 +13010,16 @@ define void @test_vsoxseg8_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -13029,16 +13029,16 @@ define void @test_vsoxseg8_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -13144,11 +13144,11 @@ define void @test_vsoxseg3_nxv4f32_nxv4i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, i32 %vl) @@ -13158,11 +13158,11 @@ define void @test_vsoxseg3_mask_nxv4f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13175,11 +13175,11 @@ define void @test_vsoxseg3_nxv4f32_nxv4i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, i32 %vl) @@ -13189,11 +13189,11 @@ define void @test_vsoxseg3_mask_nxv4f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13206,11 +13206,11 @@ define void @test_vsoxseg3_nxv4f32_nxv4i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, i32 %vl) @@ -13220,11 +13220,11 @@ define void @test_vsoxseg3_mask_nxv4f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13237,12 +13237,12 @@ define void @test_vsoxseg4_nxv4f32_nxv4i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -13252,12 +13252,12 @@ define void @test_vsoxseg4_mask_nxv4f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13270,12 +13270,12 @@ define void @test_vsoxseg4_nxv4f32_nxv4i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -13285,12 +13285,12 @@ define void @test_vsoxseg4_mask_nxv4f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13303,12 +13303,12 @@ define void @test_vsoxseg4_nxv4f32_nxv4i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -13318,12 +13318,12 @@ define void @test_vsoxseg4_mask_nxv4f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll @@ -221,11 +221,11 @@ define void @test_vsoxseg3_nxv4i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -235,11 +235,11 @@ define void @test_vsoxseg3_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -252,11 +252,11 @@ define void @test_vsoxseg3_nxv4i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -266,11 +266,11 @@ define void @test_vsoxseg3_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -283,11 +283,11 @@ define void @test_vsoxseg3_nxv4i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -297,11 +297,11 @@ define void @test_vsoxseg3_mask_nxv4i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -314,11 +314,11 @@ define void @test_vsoxseg3_nxv4i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -328,11 +328,11 @@ define void @test_vsoxseg3_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -345,12 +345,12 @@ define void @test_vsoxseg4_nxv4i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -360,12 +360,12 @@ define void @test_vsoxseg4_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -378,12 +378,12 @@ define void @test_vsoxseg4_nxv4i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -393,12 +393,12 @@ define void @test_vsoxseg4_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -411,12 +411,12 @@ define void @test_vsoxseg4_nxv4i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -426,12 +426,12 @@ define void @test_vsoxseg4_mask_nxv4i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -444,12 +444,12 @@ define void @test_vsoxseg4_nxv4i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -459,12 +459,12 @@ define void @test_vsoxseg4_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -566,11 +566,11 @@ define void @test_vsoxseg3_nxv16i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -580,11 +580,11 @@ define void @test_vsoxseg3_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -597,11 +597,11 @@ define void @test_vsoxseg3_nxv16i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -611,11 +611,11 @@ define void @test_vsoxseg3_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -659,12 +659,12 @@ define void @test_vsoxseg4_nxv16i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -674,12 +674,12 @@ define void @test_vsoxseg4_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -692,12 +692,12 @@ define void @test_vsoxseg4_nxv16i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -707,12 +707,12 @@ define void @test_vsoxseg4_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -882,11 +882,11 @@ define void @test_vsoxseg3_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -896,11 +896,11 @@ define void @test_vsoxseg3_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -913,11 +913,11 @@ define void @test_vsoxseg3_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -927,11 +927,11 @@ define void @test_vsoxseg3_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -944,11 +944,11 @@ define void @test_vsoxseg3_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -958,11 +958,11 @@ define void @test_vsoxseg3_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -975,11 +975,11 @@ define void @test_vsoxseg3_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -989,11 +989,11 @@ define void @test_vsoxseg3_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1006,12 +1006,12 @@ define void @test_vsoxseg4_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1021,12 +1021,12 @@ define void @test_vsoxseg4_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1039,12 +1039,12 @@ define void @test_vsoxseg4_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1054,12 +1054,12 @@ define void @test_vsoxseg4_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1072,12 +1072,12 @@ define void @test_vsoxseg4_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1087,12 +1087,12 @@ define void @test_vsoxseg4_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1105,12 +1105,12 @@ define void @test_vsoxseg4_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1120,12 +1120,12 @@ define void @test_vsoxseg4_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1138,13 +1138,13 @@ define void @test_vsoxseg5_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1154,13 +1154,13 @@ define void @test_vsoxseg5_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1173,13 +1173,13 @@ define void @test_vsoxseg5_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1189,13 +1189,13 @@ define void @test_vsoxseg5_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1208,13 +1208,13 @@ define void @test_vsoxseg5_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1224,13 +1224,13 @@ define void @test_vsoxseg5_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1243,13 +1243,13 @@ define void @test_vsoxseg5_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1259,13 +1259,13 @@ define void @test_vsoxseg5_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1278,14 +1278,14 @@ define void @test_vsoxseg6_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1295,14 +1295,14 @@ define void @test_vsoxseg6_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1315,14 +1315,14 @@ define void @test_vsoxseg6_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1332,14 +1332,14 @@ define void @test_vsoxseg6_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1352,14 +1352,14 @@ define void @test_vsoxseg6_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1369,14 +1369,14 @@ define void @test_vsoxseg6_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1389,14 +1389,14 @@ define void @test_vsoxseg6_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1406,14 +1406,14 @@ define void @test_vsoxseg6_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1426,15 +1426,15 @@ define void @test_vsoxseg7_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1444,15 +1444,15 @@ define void @test_vsoxseg7_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1465,15 +1465,15 @@ define void @test_vsoxseg7_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1483,15 +1483,15 @@ define void @test_vsoxseg7_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1504,15 +1504,15 @@ define void @test_vsoxseg7_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1522,15 +1522,15 @@ define void @test_vsoxseg7_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1543,15 +1543,15 @@ define void @test_vsoxseg7_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1561,15 +1561,15 @@ define void @test_vsoxseg7_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1582,16 +1582,16 @@ define void @test_vsoxseg8_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1601,16 +1601,16 @@ define void @test_vsoxseg8_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1623,16 +1623,16 @@ define void @test_vsoxseg8_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1642,16 +1642,16 @@ define void @test_vsoxseg8_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1664,16 +1664,16 @@ define void @test_vsoxseg8_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1683,16 +1683,16 @@ define void @test_vsoxseg8_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1705,16 +1705,16 @@ define void @test_vsoxseg8_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1724,16 +1724,16 @@ define void @test_vsoxseg8_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1870,11 +1870,11 @@ define void @test_vsoxseg3_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1884,11 +1884,11 @@ define void @test_vsoxseg3_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1901,11 +1901,11 @@ define void @test_vsoxseg3_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1915,11 +1915,11 @@ define void @test_vsoxseg3_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1932,11 +1932,11 @@ define void @test_vsoxseg3_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1946,11 +1946,11 @@ define void @test_vsoxseg3_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1963,11 +1963,11 @@ define void @test_vsoxseg3_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1977,11 +1977,11 @@ define void @test_vsoxseg3_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1994,12 +1994,12 @@ define void @test_vsoxseg4_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2009,12 +2009,12 @@ define void @test_vsoxseg4_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2027,12 +2027,12 @@ define void @test_vsoxseg4_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2042,12 +2042,12 @@ define void @test_vsoxseg4_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2060,12 +2060,12 @@ define void @test_vsoxseg4_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2075,12 +2075,12 @@ define void @test_vsoxseg4_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2093,12 +2093,12 @@ define void @test_vsoxseg4_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2108,12 +2108,12 @@ define void @test_vsoxseg4_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2126,13 +2126,13 @@ define void @test_vsoxseg5_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2142,13 +2142,13 @@ define void @test_vsoxseg5_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2161,13 +2161,13 @@ define void @test_vsoxseg5_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2177,13 +2177,13 @@ define void @test_vsoxseg5_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2196,13 +2196,13 @@ define void @test_vsoxseg5_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2212,13 +2212,13 @@ define void @test_vsoxseg5_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2231,13 +2231,13 @@ define void @test_vsoxseg5_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2247,13 +2247,13 @@ define void @test_vsoxseg5_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2266,14 +2266,14 @@ define void @test_vsoxseg6_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2283,14 +2283,14 @@ define void @test_vsoxseg6_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2303,14 +2303,14 @@ define void @test_vsoxseg6_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2320,14 +2320,14 @@ define void @test_vsoxseg6_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2340,14 +2340,14 @@ define void @test_vsoxseg6_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2357,14 +2357,14 @@ define void @test_vsoxseg6_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2377,14 +2377,14 @@ define void @test_vsoxseg6_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2394,14 +2394,14 @@ define void @test_vsoxseg6_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2414,15 +2414,15 @@ define void @test_vsoxseg7_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2432,15 +2432,15 @@ define void @test_vsoxseg7_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2453,15 +2453,15 @@ define void @test_vsoxseg7_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2471,15 +2471,15 @@ define void @test_vsoxseg7_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2492,15 +2492,15 @@ define void @test_vsoxseg7_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2510,15 +2510,15 @@ define void @test_vsoxseg7_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2531,15 +2531,15 @@ define void @test_vsoxseg7_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2549,15 +2549,15 @@ define void @test_vsoxseg7_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2570,16 +2570,16 @@ define void @test_vsoxseg8_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2589,16 +2589,16 @@ define void @test_vsoxseg8_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2611,16 +2611,16 @@ define void @test_vsoxseg8_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2630,16 +2630,16 @@ define void @test_vsoxseg8_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2652,16 +2652,16 @@ define void @test_vsoxseg8_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2671,16 +2671,16 @@ define void @test_vsoxseg8_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2693,16 +2693,16 @@ define void @test_vsoxseg8_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2712,16 +2712,16 @@ define void @test_vsoxseg8_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2854,11 +2854,11 @@ define void @test_vsoxseg3_nxv8i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2868,11 +2868,11 @@ define void @test_vsoxseg3_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2885,11 +2885,11 @@ define void @test_vsoxseg3_nxv8i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2899,11 +2899,11 @@ define void @test_vsoxseg3_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2947,11 +2947,11 @@ define void @test_vsoxseg3_nxv8i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2961,11 +2961,11 @@ define void @test_vsoxseg3_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2978,12 +2978,12 @@ define void @test_vsoxseg4_nxv8i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2993,12 +2993,12 @@ define void @test_vsoxseg4_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -3011,12 +3011,12 @@ define void @test_vsoxseg4_nxv8i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -3026,12 +3026,12 @@ define void @test_vsoxseg4_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -3077,12 +3077,12 @@ define void @test_vsoxseg4_nxv8i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -3092,12 +3092,12 @@ define void @test_vsoxseg4_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -3230,11 +3230,11 @@ define void @test_vsoxseg3_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3244,11 +3244,11 @@ define void @test_vsoxseg3_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3261,11 +3261,11 @@ define void @test_vsoxseg3_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3275,11 +3275,11 @@ define void @test_vsoxseg3_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3323,11 +3323,11 @@ define void @test_vsoxseg3_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3337,11 +3337,11 @@ define void @test_vsoxseg3_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3354,12 +3354,12 @@ define void @test_vsoxseg4_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3369,12 +3369,12 @@ define void @test_vsoxseg4_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3387,12 +3387,12 @@ define void @test_vsoxseg4_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3402,12 +3402,12 @@ define void @test_vsoxseg4_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3453,12 +3453,12 @@ define void @test_vsoxseg4_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3468,12 +3468,12 @@ define void @test_vsoxseg4_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3486,13 +3486,13 @@ define void @test_vsoxseg5_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3502,13 +3502,13 @@ define void @test_vsoxseg5_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3521,13 +3521,13 @@ define void @test_vsoxseg5_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3537,13 +3537,13 @@ define void @test_vsoxseg5_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3556,13 +3556,13 @@ define void @test_vsoxseg5_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3572,13 +3572,13 @@ define void @test_vsoxseg5_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3591,13 +3591,13 @@ define void @test_vsoxseg5_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3607,13 +3607,13 @@ define void @test_vsoxseg5_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3626,14 +3626,14 @@ define void @test_vsoxseg6_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3643,14 +3643,14 @@ define void @test_vsoxseg6_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3663,14 +3663,14 @@ define void @test_vsoxseg6_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3680,14 +3680,14 @@ define void @test_vsoxseg6_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3700,14 +3700,14 @@ define void @test_vsoxseg6_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3717,14 +3717,14 @@ define void @test_vsoxseg6_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3737,14 +3737,14 @@ define void @test_vsoxseg6_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3754,14 +3754,14 @@ define void @test_vsoxseg6_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3774,15 +3774,15 @@ define void @test_vsoxseg7_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3792,15 +3792,15 @@ define void @test_vsoxseg7_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3813,15 +3813,15 @@ define void @test_vsoxseg7_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3831,15 +3831,15 @@ define void @test_vsoxseg7_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3852,15 +3852,15 @@ define void @test_vsoxseg7_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3870,15 +3870,15 @@ define void @test_vsoxseg7_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3891,15 +3891,15 @@ define void @test_vsoxseg7_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3909,15 +3909,15 @@ define void @test_vsoxseg7_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3930,16 +3930,16 @@ define void @test_vsoxseg8_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3949,16 +3949,16 @@ define void @test_vsoxseg8_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3971,16 +3971,16 @@ define void @test_vsoxseg8_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3990,16 +3990,16 @@ define void @test_vsoxseg8_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -4012,16 +4012,16 @@ define void @test_vsoxseg8_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -4031,16 +4031,16 @@ define void @test_vsoxseg8_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -4053,16 +4053,16 @@ define void @test_vsoxseg8_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -4072,16 +4072,16 @@ define void @test_vsoxseg8_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -4218,11 +4218,11 @@ define void @test_vsoxseg3_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4232,11 +4232,11 @@ define void @test_vsoxseg3_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4249,11 +4249,11 @@ define void @test_vsoxseg3_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4263,11 +4263,11 @@ define void @test_vsoxseg3_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4280,11 +4280,11 @@ define void @test_vsoxseg3_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4294,11 +4294,11 @@ define void @test_vsoxseg3_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4311,11 +4311,11 @@ define void @test_vsoxseg3_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4325,11 +4325,11 @@ define void @test_vsoxseg3_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4342,12 +4342,12 @@ define void @test_vsoxseg4_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4357,12 +4357,12 @@ define void @test_vsoxseg4_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4375,12 +4375,12 @@ define void @test_vsoxseg4_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4390,12 +4390,12 @@ define void @test_vsoxseg4_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4408,12 +4408,12 @@ define void @test_vsoxseg4_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4423,12 +4423,12 @@ define void @test_vsoxseg4_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4441,12 +4441,12 @@ define void @test_vsoxseg4_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4456,12 +4456,12 @@ define void @test_vsoxseg4_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4474,13 +4474,13 @@ define void @test_vsoxseg5_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4490,13 +4490,13 @@ define void @test_vsoxseg5_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4509,13 +4509,13 @@ define void @test_vsoxseg5_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4525,13 +4525,13 @@ define void @test_vsoxseg5_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4544,13 +4544,13 @@ define void @test_vsoxseg5_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4560,13 +4560,13 @@ define void @test_vsoxseg5_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4579,13 +4579,13 @@ define void @test_vsoxseg5_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4595,13 +4595,13 @@ define void @test_vsoxseg5_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4614,14 +4614,14 @@ define void @test_vsoxseg6_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4631,14 +4631,14 @@ define void @test_vsoxseg6_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4651,14 +4651,14 @@ define void @test_vsoxseg6_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4668,14 +4668,14 @@ define void @test_vsoxseg6_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4688,14 +4688,14 @@ define void @test_vsoxseg6_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4705,14 +4705,14 @@ define void @test_vsoxseg6_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4725,14 +4725,14 @@ define void @test_vsoxseg6_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4742,14 +4742,14 @@ define void @test_vsoxseg6_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4762,15 +4762,15 @@ define void @test_vsoxseg7_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4780,15 +4780,15 @@ define void @test_vsoxseg7_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4801,15 +4801,15 @@ define void @test_vsoxseg7_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4819,15 +4819,15 @@ define void @test_vsoxseg7_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4840,15 +4840,15 @@ define void @test_vsoxseg7_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4858,15 +4858,15 @@ define void @test_vsoxseg7_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4879,15 +4879,15 @@ define void @test_vsoxseg7_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4897,15 +4897,15 @@ define void @test_vsoxseg7_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4918,16 +4918,16 @@ define void @test_vsoxseg8_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4937,16 +4937,16 @@ define void @test_vsoxseg8_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4959,16 +4959,16 @@ define void @test_vsoxseg8_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4978,16 +4978,16 @@ define void @test_vsoxseg8_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -5000,16 +5000,16 @@ define void @test_vsoxseg8_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -5019,16 +5019,16 @@ define void @test_vsoxseg8_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -5041,16 +5041,16 @@ define void @test_vsoxseg8_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -5060,16 +5060,16 @@ define void @test_vsoxseg8_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -5204,11 +5204,11 @@ define void @test_vsoxseg3_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5218,11 +5218,11 @@ define void @test_vsoxseg3_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5235,11 +5235,11 @@ define void @test_vsoxseg3_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5249,11 +5249,11 @@ define void @test_vsoxseg3_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5266,11 +5266,11 @@ define void @test_vsoxseg3_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5280,11 +5280,11 @@ define void @test_vsoxseg3_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5297,11 +5297,11 @@ define void @test_vsoxseg3_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5311,11 +5311,11 @@ define void @test_vsoxseg3_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5328,12 +5328,12 @@ define void @test_vsoxseg4_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5343,12 +5343,12 @@ define void @test_vsoxseg4_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5361,12 +5361,12 @@ define void @test_vsoxseg4_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5376,12 +5376,12 @@ define void @test_vsoxseg4_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5394,12 +5394,12 @@ define void @test_vsoxseg4_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5409,12 +5409,12 @@ define void @test_vsoxseg4_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5427,12 +5427,12 @@ define void @test_vsoxseg4_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5442,12 +5442,12 @@ define void @test_vsoxseg4_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5460,13 +5460,13 @@ define void @test_vsoxseg5_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5476,13 +5476,13 @@ define void @test_vsoxseg5_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5495,13 +5495,13 @@ define void @test_vsoxseg5_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5511,13 +5511,13 @@ define void @test_vsoxseg5_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5530,13 +5530,13 @@ define void @test_vsoxseg5_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5546,13 +5546,13 @@ define void @test_vsoxseg5_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5565,13 +5565,13 @@ define void @test_vsoxseg5_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5581,13 +5581,13 @@ define void @test_vsoxseg5_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5600,14 +5600,14 @@ define void @test_vsoxseg6_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5617,14 +5617,14 @@ define void @test_vsoxseg6_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5637,14 +5637,14 @@ define void @test_vsoxseg6_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5654,14 +5654,14 @@ define void @test_vsoxseg6_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5674,14 +5674,14 @@ define void @test_vsoxseg6_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5691,14 +5691,14 @@ define void @test_vsoxseg6_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5711,14 +5711,14 @@ define void @test_vsoxseg6_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5728,14 +5728,14 @@ define void @test_vsoxseg6_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5748,15 +5748,15 @@ define void @test_vsoxseg7_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5766,15 +5766,15 @@ define void @test_vsoxseg7_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5787,15 +5787,15 @@ define void @test_vsoxseg7_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5805,15 +5805,15 @@ define void @test_vsoxseg7_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5826,15 +5826,15 @@ define void @test_vsoxseg7_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5844,15 +5844,15 @@ define void @test_vsoxseg7_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5865,15 +5865,15 @@ define void @test_vsoxseg7_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5883,15 +5883,15 @@ define void @test_vsoxseg7_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5904,16 +5904,16 @@ define void @test_vsoxseg8_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5923,16 +5923,16 @@ define void @test_vsoxseg8_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5945,16 +5945,16 @@ define void @test_vsoxseg8_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5964,16 +5964,16 @@ define void @test_vsoxseg8_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5986,16 +5986,16 @@ define void @test_vsoxseg8_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -6005,16 +6005,16 @@ define void @test_vsoxseg8_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -6027,16 +6027,16 @@ define void @test_vsoxseg8_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -6046,16 +6046,16 @@ define void @test_vsoxseg8_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -6186,11 +6186,11 @@ define void @test_vsoxseg3_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6200,11 +6200,11 @@ define void @test_vsoxseg3_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6217,11 +6217,11 @@ define void @test_vsoxseg3_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6231,11 +6231,11 @@ define void @test_vsoxseg3_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6310,12 +6310,12 @@ define void @test_vsoxseg4_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6325,12 +6325,12 @@ define void @test_vsoxseg4_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6343,12 +6343,12 @@ define void @test_vsoxseg4_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6358,12 +6358,12 @@ define void @test_vsoxseg4_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6442,13 +6442,13 @@ define void @test_vsoxseg5_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6458,13 +6458,13 @@ define void @test_vsoxseg5_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6477,13 +6477,13 @@ define void @test_vsoxseg5_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6493,13 +6493,13 @@ define void @test_vsoxseg5_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6547,13 +6547,13 @@ define void @test_vsoxseg5_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6563,13 +6563,13 @@ define void @test_vsoxseg5_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6582,14 +6582,14 @@ define void @test_vsoxseg6_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6599,14 +6599,14 @@ define void @test_vsoxseg6_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6619,14 +6619,14 @@ define void @test_vsoxseg6_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6636,14 +6636,14 @@ define void @test_vsoxseg6_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6693,14 +6693,14 @@ define void @test_vsoxseg6_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6710,14 +6710,14 @@ define void @test_vsoxseg6_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6730,15 +6730,15 @@ define void @test_vsoxseg7_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6748,15 +6748,15 @@ define void @test_vsoxseg7_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6769,15 +6769,15 @@ define void @test_vsoxseg7_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6787,15 +6787,15 @@ define void @test_vsoxseg7_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6847,15 +6847,15 @@ define void @test_vsoxseg7_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6865,15 +6865,15 @@ define void @test_vsoxseg7_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6886,16 +6886,16 @@ define void @test_vsoxseg8_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6905,16 +6905,16 @@ define void @test_vsoxseg8_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6927,16 +6927,16 @@ define void @test_vsoxseg8_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6946,16 +6946,16 @@ define void @test_vsoxseg8_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -7009,16 +7009,16 @@ define void @test_vsoxseg8_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -7028,16 +7028,16 @@ define void @test_vsoxseg8_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -7294,11 +7294,11 @@ define void @test_vsoxseg3_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7308,11 +7308,11 @@ define void @test_vsoxseg3_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7325,11 +7325,11 @@ define void @test_vsoxseg3_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7339,11 +7339,11 @@ define void @test_vsoxseg3_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7387,11 +7387,11 @@ define void @test_vsoxseg3_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7401,11 +7401,11 @@ define void @test_vsoxseg3_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7418,12 +7418,12 @@ define void @test_vsoxseg4_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7433,12 +7433,12 @@ define void @test_vsoxseg4_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7451,12 +7451,12 @@ define void @test_vsoxseg4_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7466,12 +7466,12 @@ define void @test_vsoxseg4_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7517,12 +7517,12 @@ define void @test_vsoxseg4_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7532,12 +7532,12 @@ define void @test_vsoxseg4_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7550,13 +7550,13 @@ define void @test_vsoxseg5_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7566,13 +7566,13 @@ define void @test_vsoxseg5_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7585,13 +7585,13 @@ define void @test_vsoxseg5_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7601,13 +7601,13 @@ define void @test_vsoxseg5_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7620,13 +7620,13 @@ define void @test_vsoxseg5_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7636,13 +7636,13 @@ define void @test_vsoxseg5_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7655,13 +7655,13 @@ define void @test_vsoxseg5_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7671,13 +7671,13 @@ define void @test_vsoxseg5_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7690,14 +7690,14 @@ define void @test_vsoxseg6_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7707,14 +7707,14 @@ define void @test_vsoxseg6_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7727,14 +7727,14 @@ define void @test_vsoxseg6_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7744,14 +7744,14 @@ define void @test_vsoxseg6_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7764,14 +7764,14 @@ define void @test_vsoxseg6_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7781,14 +7781,14 @@ define void @test_vsoxseg6_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7801,14 +7801,14 @@ define void @test_vsoxseg6_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7818,14 +7818,14 @@ define void @test_vsoxseg6_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7838,15 +7838,15 @@ define void @test_vsoxseg7_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7856,15 +7856,15 @@ define void @test_vsoxseg7_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7877,15 +7877,15 @@ define void @test_vsoxseg7_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7895,15 +7895,15 @@ define void @test_vsoxseg7_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7916,15 +7916,15 @@ define void @test_vsoxseg7_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7934,15 +7934,15 @@ define void @test_vsoxseg7_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7955,15 +7955,15 @@ define void @test_vsoxseg7_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7973,15 +7973,15 @@ define void @test_vsoxseg7_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7994,16 +7994,16 @@ define void @test_vsoxseg8_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8013,16 +8013,16 @@ define void @test_vsoxseg8_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8035,16 +8035,16 @@ define void @test_vsoxseg8_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8054,16 +8054,16 @@ define void @test_vsoxseg8_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8076,16 +8076,16 @@ define void @test_vsoxseg8_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8095,16 +8095,16 @@ define void @test_vsoxseg8_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8117,16 +8117,16 @@ define void @test_vsoxseg8_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8136,16 +8136,16 @@ define void @test_vsoxseg8_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8282,11 +8282,11 @@ define void @test_vsoxseg3_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8296,11 +8296,11 @@ define void @test_vsoxseg3_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8313,11 +8313,11 @@ define void @test_vsoxseg3_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8327,11 +8327,11 @@ define void @test_vsoxseg3_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8344,11 +8344,11 @@ define void @test_vsoxseg3_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8358,11 +8358,11 @@ define void @test_vsoxseg3_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8375,11 +8375,11 @@ define void @test_vsoxseg3_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8389,11 +8389,11 @@ define void @test_vsoxseg3_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8406,12 +8406,12 @@ define void @test_vsoxseg4_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8421,12 +8421,12 @@ define void @test_vsoxseg4_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8439,12 +8439,12 @@ define void @test_vsoxseg4_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8454,12 +8454,12 @@ define void @test_vsoxseg4_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8472,12 +8472,12 @@ define void @test_vsoxseg4_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8487,12 +8487,12 @@ define void @test_vsoxseg4_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8505,12 +8505,12 @@ define void @test_vsoxseg4_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8520,12 +8520,12 @@ define void @test_vsoxseg4_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8538,13 +8538,13 @@ define void @test_vsoxseg5_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8554,13 +8554,13 @@ define void @test_vsoxseg5_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8573,13 +8573,13 @@ define void @test_vsoxseg5_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8589,13 +8589,13 @@ define void @test_vsoxseg5_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8608,13 +8608,13 @@ define void @test_vsoxseg5_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8624,13 +8624,13 @@ define void @test_vsoxseg5_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8643,13 +8643,13 @@ define void @test_vsoxseg5_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8659,13 +8659,13 @@ define void @test_vsoxseg5_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8678,14 +8678,14 @@ define void @test_vsoxseg6_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8695,14 +8695,14 @@ define void @test_vsoxseg6_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8715,14 +8715,14 @@ define void @test_vsoxseg6_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8732,14 +8732,14 @@ define void @test_vsoxseg6_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8752,14 +8752,14 @@ define void @test_vsoxseg6_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8769,14 +8769,14 @@ define void @test_vsoxseg6_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8789,14 +8789,14 @@ define void @test_vsoxseg6_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8806,14 +8806,14 @@ define void @test_vsoxseg6_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8826,15 +8826,15 @@ define void @test_vsoxseg7_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8844,15 +8844,15 @@ define void @test_vsoxseg7_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8865,15 +8865,15 @@ define void @test_vsoxseg7_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8883,15 +8883,15 @@ define void @test_vsoxseg7_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8904,15 +8904,15 @@ define void @test_vsoxseg7_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8922,15 +8922,15 @@ define void @test_vsoxseg7_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8943,15 +8943,15 @@ define void @test_vsoxseg7_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8961,15 +8961,15 @@ define void @test_vsoxseg7_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8982,16 +8982,16 @@ define void @test_vsoxseg8_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9001,16 +9001,16 @@ define void @test_vsoxseg8_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9023,16 +9023,16 @@ define void @test_vsoxseg8_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9042,16 +9042,16 @@ define void @test_vsoxseg8_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9064,16 +9064,16 @@ define void @test_vsoxseg8_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9083,16 +9083,16 @@ define void @test_vsoxseg8_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9105,16 +9105,16 @@ define void @test_vsoxseg8_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9124,16 +9124,16 @@ define void @test_vsoxseg8_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9268,11 +9268,11 @@ define void @test_vsoxseg3_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9282,11 +9282,11 @@ define void @test_vsoxseg3_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9299,11 +9299,11 @@ define void @test_vsoxseg3_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9313,11 +9313,11 @@ define void @test_vsoxseg3_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9330,11 +9330,11 @@ define void @test_vsoxseg3_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9344,11 +9344,11 @@ define void @test_vsoxseg3_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9361,11 +9361,11 @@ define void @test_vsoxseg3_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9375,11 +9375,11 @@ define void @test_vsoxseg3_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9392,12 +9392,12 @@ define void @test_vsoxseg4_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9407,12 +9407,12 @@ define void @test_vsoxseg4_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9425,12 +9425,12 @@ define void @test_vsoxseg4_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9440,12 +9440,12 @@ define void @test_vsoxseg4_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9458,12 +9458,12 @@ define void @test_vsoxseg4_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9473,12 +9473,12 @@ define void @test_vsoxseg4_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9491,12 +9491,12 @@ define void @test_vsoxseg4_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9506,12 +9506,12 @@ define void @test_vsoxseg4_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9524,13 +9524,13 @@ define void @test_vsoxseg5_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9540,13 +9540,13 @@ define void @test_vsoxseg5_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9559,13 +9559,13 @@ define void @test_vsoxseg5_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9575,13 +9575,13 @@ define void @test_vsoxseg5_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9594,13 +9594,13 @@ define void @test_vsoxseg5_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9610,13 +9610,13 @@ define void @test_vsoxseg5_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9629,13 +9629,13 @@ define void @test_vsoxseg5_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9645,13 +9645,13 @@ define void @test_vsoxseg5_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9664,14 +9664,14 @@ define void @test_vsoxseg6_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9681,14 +9681,14 @@ define void @test_vsoxseg6_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9701,14 +9701,14 @@ define void @test_vsoxseg6_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9718,14 +9718,14 @@ define void @test_vsoxseg6_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9738,14 +9738,14 @@ define void @test_vsoxseg6_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9755,14 +9755,14 @@ define void @test_vsoxseg6_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9775,14 +9775,14 @@ define void @test_vsoxseg6_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9792,14 +9792,14 @@ define void @test_vsoxseg6_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9812,15 +9812,15 @@ define void @test_vsoxseg7_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9830,15 +9830,15 @@ define void @test_vsoxseg7_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9851,15 +9851,15 @@ define void @test_vsoxseg7_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9869,15 +9869,15 @@ define void @test_vsoxseg7_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9890,15 +9890,15 @@ define void @test_vsoxseg7_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9908,15 +9908,15 @@ define void @test_vsoxseg7_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9929,15 +9929,15 @@ define void @test_vsoxseg7_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9947,15 +9947,15 @@ define void @test_vsoxseg7_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9968,16 +9968,16 @@ define void @test_vsoxseg8_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9987,16 +9987,16 @@ define void @test_vsoxseg8_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10009,16 +10009,16 @@ define void @test_vsoxseg8_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -10028,16 +10028,16 @@ define void @test_vsoxseg8_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10050,16 +10050,16 @@ define void @test_vsoxseg8_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -10069,16 +10069,16 @@ define void @test_vsoxseg8_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10091,16 +10091,16 @@ define void @test_vsoxseg8_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -10110,16 +10110,16 @@ define void @test_vsoxseg8_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10436,11 +10436,11 @@ define void @test_vsoxseg3_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10450,11 +10450,11 @@ define void @test_vsoxseg3_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10467,11 +10467,11 @@ define void @test_vsoxseg3_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10481,11 +10481,11 @@ define void @test_vsoxseg3_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10498,11 +10498,11 @@ define void @test_vsoxseg3_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10512,11 +10512,11 @@ define void @test_vsoxseg3_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10529,11 +10529,11 @@ define void @test_vsoxseg3_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10543,11 +10543,11 @@ define void @test_vsoxseg3_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10560,12 +10560,12 @@ define void @test_vsoxseg4_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10575,12 +10575,12 @@ define void @test_vsoxseg4_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10593,12 +10593,12 @@ define void @test_vsoxseg4_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10608,12 +10608,12 @@ define void @test_vsoxseg4_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10626,12 +10626,12 @@ define void @test_vsoxseg4_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10641,12 +10641,12 @@ define void @test_vsoxseg4_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10659,12 +10659,12 @@ define void @test_vsoxseg4_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10674,12 +10674,12 @@ define void @test_vsoxseg4_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10692,13 +10692,13 @@ define void @test_vsoxseg5_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10708,13 +10708,13 @@ define void @test_vsoxseg5_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10727,13 +10727,13 @@ define void @test_vsoxseg5_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10743,13 +10743,13 @@ define void @test_vsoxseg5_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10762,13 +10762,13 @@ define void @test_vsoxseg5_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10778,13 +10778,13 @@ define void @test_vsoxseg5_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10797,13 +10797,13 @@ define void @test_vsoxseg5_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10813,13 +10813,13 @@ define void @test_vsoxseg5_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10832,14 +10832,14 @@ define void @test_vsoxseg6_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10849,14 +10849,14 @@ define void @test_vsoxseg6_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10869,14 +10869,14 @@ define void @test_vsoxseg6_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10886,14 +10886,14 @@ define void @test_vsoxseg6_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10906,14 +10906,14 @@ define void @test_vsoxseg6_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10923,14 +10923,14 @@ define void @test_vsoxseg6_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10943,14 +10943,14 @@ define void @test_vsoxseg6_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10960,14 +10960,14 @@ define void @test_vsoxseg6_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10980,15 +10980,15 @@ define void @test_vsoxseg7_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10998,15 +10998,15 @@ define void @test_vsoxseg7_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11019,15 +11019,15 @@ define void @test_vsoxseg7_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11037,15 +11037,15 @@ define void @test_vsoxseg7_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11058,15 +11058,15 @@ define void @test_vsoxseg7_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11076,15 +11076,15 @@ define void @test_vsoxseg7_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11097,15 +11097,15 @@ define void @test_vsoxseg7_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11115,15 +11115,15 @@ define void @test_vsoxseg7_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11136,16 +11136,16 @@ define void @test_vsoxseg8_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11155,16 +11155,16 @@ define void @test_vsoxseg8_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11177,16 +11177,16 @@ define void @test_vsoxseg8_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11196,16 +11196,16 @@ define void @test_vsoxseg8_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11218,16 +11218,16 @@ define void @test_vsoxseg8_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11237,16 +11237,16 @@ define void @test_vsoxseg8_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11259,16 +11259,16 @@ define void @test_vsoxseg8_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11278,16 +11278,16 @@ define void @test_vsoxseg8_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11424,11 +11424,11 @@ define void @test_vsoxseg3_nxv2i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11438,11 +11438,11 @@ define void @test_vsoxseg3_mask_nxv2i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11455,11 +11455,11 @@ define void @test_vsoxseg3_nxv2i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11469,11 +11469,11 @@ define void @test_vsoxseg3_mask_nxv2i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11486,11 +11486,11 @@ define void @test_vsoxseg3_nxv2i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11500,11 +11500,11 @@ define void @test_vsoxseg3_mask_nxv2i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11517,11 +11517,11 @@ define void @test_vsoxseg3_nxv2i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11531,11 +11531,11 @@ define void @test_vsoxseg3_mask_nxv2i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11548,12 +11548,12 @@ define void @test_vsoxseg4_nxv2i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11563,12 +11563,12 @@ define void @test_vsoxseg4_mask_nxv2i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11581,12 +11581,12 @@ define void @test_vsoxseg4_nxv2i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11596,12 +11596,12 @@ define void @test_vsoxseg4_mask_nxv2i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11614,12 +11614,12 @@ define void @test_vsoxseg4_nxv2i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11629,12 +11629,12 @@ define void @test_vsoxseg4_mask_nxv2i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11647,12 +11647,12 @@ define void @test_vsoxseg4_nxv2i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11662,12 +11662,12 @@ define void @test_vsoxseg4_mask_nxv2i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -12019,11 +12019,11 @@ define void @test_vsoxseg3_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12033,11 +12033,11 @@ define void @test_vsoxseg3_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12050,11 +12050,11 @@ define void @test_vsoxseg3_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12064,11 +12064,11 @@ define void @test_vsoxseg3_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12081,11 +12081,11 @@ define void @test_vsoxseg3_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12095,11 +12095,11 @@ define void @test_vsoxseg3_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12112,11 +12112,11 @@ define void @test_vsoxseg3_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12126,11 +12126,11 @@ define void @test_vsoxseg3_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12143,12 +12143,12 @@ define void @test_vsoxseg4_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12158,12 +12158,12 @@ define void @test_vsoxseg4_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12176,12 +12176,12 @@ define void @test_vsoxseg4_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12191,12 +12191,12 @@ define void @test_vsoxseg4_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12209,12 +12209,12 @@ define void @test_vsoxseg4_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12224,12 +12224,12 @@ define void @test_vsoxseg4_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12242,12 +12242,12 @@ define void @test_vsoxseg4_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12257,12 +12257,12 @@ define void @test_vsoxseg4_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12275,13 +12275,13 @@ define void @test_vsoxseg5_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12291,13 +12291,13 @@ define void @test_vsoxseg5_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12310,13 +12310,13 @@ define void @test_vsoxseg5_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12326,13 +12326,13 @@ define void @test_vsoxseg5_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12345,13 +12345,13 @@ define void @test_vsoxseg5_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12361,13 +12361,13 @@ define void @test_vsoxseg5_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12380,13 +12380,13 @@ define void @test_vsoxseg5_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12396,13 +12396,13 @@ define void @test_vsoxseg5_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12415,14 +12415,14 @@ define void @test_vsoxseg6_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12432,14 +12432,14 @@ define void @test_vsoxseg6_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12452,14 +12452,14 @@ define void @test_vsoxseg6_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12469,14 +12469,14 @@ define void @test_vsoxseg6_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12489,14 +12489,14 @@ define void @test_vsoxseg6_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12506,14 +12506,14 @@ define void @test_vsoxseg6_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12526,14 +12526,14 @@ define void @test_vsoxseg6_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12543,14 +12543,14 @@ define void @test_vsoxseg6_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12563,15 +12563,15 @@ define void @test_vsoxseg7_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12581,15 +12581,15 @@ define void @test_vsoxseg7_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12602,15 +12602,15 @@ define void @test_vsoxseg7_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12620,15 +12620,15 @@ define void @test_vsoxseg7_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12641,15 +12641,15 @@ define void @test_vsoxseg7_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12659,15 +12659,15 @@ define void @test_vsoxseg7_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12680,15 +12680,15 @@ define void @test_vsoxseg7_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12698,15 +12698,15 @@ define void @test_vsoxseg7_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12719,16 +12719,16 @@ define void @test_vsoxseg8_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12738,16 +12738,16 @@ define void @test_vsoxseg8_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12760,16 +12760,16 @@ define void @test_vsoxseg8_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12779,16 +12779,16 @@ define void @test_vsoxseg8_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12801,16 +12801,16 @@ define void @test_vsoxseg8_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12820,16 +12820,16 @@ define void @test_vsoxseg8_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12842,16 +12842,16 @@ define void @test_vsoxseg8_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12861,16 +12861,16 @@ define void @test_vsoxseg8_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -13005,11 +13005,11 @@ define void @test_vsoxseg3_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13019,11 +13019,11 @@ define void @test_vsoxseg3_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13036,11 +13036,11 @@ define void @test_vsoxseg3_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13050,11 +13050,11 @@ define void @test_vsoxseg3_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13067,11 +13067,11 @@ define void @test_vsoxseg3_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13081,11 +13081,11 @@ define void @test_vsoxseg3_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13098,11 +13098,11 @@ define void @test_vsoxseg3_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13112,11 +13112,11 @@ define void @test_vsoxseg3_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13129,12 +13129,12 @@ define void @test_vsoxseg4_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13144,12 +13144,12 @@ define void @test_vsoxseg4_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13162,12 +13162,12 @@ define void @test_vsoxseg4_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13177,12 +13177,12 @@ define void @test_vsoxseg4_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13195,12 +13195,12 @@ define void @test_vsoxseg4_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13210,12 +13210,12 @@ define void @test_vsoxseg4_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13228,12 +13228,12 @@ define void @test_vsoxseg4_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13243,12 +13243,12 @@ define void @test_vsoxseg4_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13261,13 +13261,13 @@ define void @test_vsoxseg5_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13277,13 +13277,13 @@ define void @test_vsoxseg5_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13296,13 +13296,13 @@ define void @test_vsoxseg5_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13312,13 +13312,13 @@ define void @test_vsoxseg5_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13331,13 +13331,13 @@ define void @test_vsoxseg5_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13347,13 +13347,13 @@ define void @test_vsoxseg5_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13366,13 +13366,13 @@ define void @test_vsoxseg5_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13382,13 +13382,13 @@ define void @test_vsoxseg5_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13401,14 +13401,14 @@ define void @test_vsoxseg6_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13418,14 +13418,14 @@ define void @test_vsoxseg6_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13438,14 +13438,14 @@ define void @test_vsoxseg6_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13455,14 +13455,14 @@ define void @test_vsoxseg6_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13475,14 +13475,14 @@ define void @test_vsoxseg6_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13492,14 +13492,14 @@ define void @test_vsoxseg6_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13512,14 +13512,14 @@ define void @test_vsoxseg6_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13529,14 +13529,14 @@ define void @test_vsoxseg6_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13549,15 +13549,15 @@ define void @test_vsoxseg7_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13567,15 +13567,15 @@ define void @test_vsoxseg7_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13588,15 +13588,15 @@ define void @test_vsoxseg7_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13606,15 +13606,15 @@ define void @test_vsoxseg7_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13627,15 +13627,15 @@ define void @test_vsoxseg7_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13645,15 +13645,15 @@ define void @test_vsoxseg7_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13666,15 +13666,15 @@ define void @test_vsoxseg7_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13684,15 +13684,15 @@ define void @test_vsoxseg7_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13705,16 +13705,16 @@ define void @test_vsoxseg8_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13724,16 +13724,16 @@ define void @test_vsoxseg8_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13746,16 +13746,16 @@ define void @test_vsoxseg8_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13765,16 +13765,16 @@ define void @test_vsoxseg8_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13787,16 +13787,16 @@ define void @test_vsoxseg8_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13806,16 +13806,16 @@ define void @test_vsoxseg8_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13828,16 +13828,16 @@ define void @test_vsoxseg8_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13847,16 +13847,16 @@ define void @test_vsoxseg8_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13993,11 +13993,11 @@ define void @test_vsoxseg3_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14007,11 +14007,11 @@ define void @test_vsoxseg3_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14024,11 +14024,11 @@ define void @test_vsoxseg3_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14038,11 +14038,11 @@ define void @test_vsoxseg3_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14055,11 +14055,11 @@ define void @test_vsoxseg3_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14069,11 +14069,11 @@ define void @test_vsoxseg3_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14086,11 +14086,11 @@ define void @test_vsoxseg3_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14100,11 +14100,11 @@ define void @test_vsoxseg3_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14117,12 +14117,12 @@ define void @test_vsoxseg4_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14132,12 +14132,12 @@ define void @test_vsoxseg4_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14150,12 +14150,12 @@ define void @test_vsoxseg4_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14165,12 +14165,12 @@ define void @test_vsoxseg4_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14183,12 +14183,12 @@ define void @test_vsoxseg4_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14198,12 +14198,12 @@ define void @test_vsoxseg4_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14216,12 +14216,12 @@ define void @test_vsoxseg4_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14231,12 +14231,12 @@ define void @test_vsoxseg4_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14249,13 +14249,13 @@ define void @test_vsoxseg5_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14265,13 +14265,13 @@ define void @test_vsoxseg5_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14284,13 +14284,13 @@ define void @test_vsoxseg5_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14300,13 +14300,13 @@ define void @test_vsoxseg5_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14319,13 +14319,13 @@ define void @test_vsoxseg5_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14335,13 +14335,13 @@ define void @test_vsoxseg5_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14354,13 +14354,13 @@ define void @test_vsoxseg5_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14370,13 +14370,13 @@ define void @test_vsoxseg5_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14389,14 +14389,14 @@ define void @test_vsoxseg6_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14406,14 +14406,14 @@ define void @test_vsoxseg6_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14426,14 +14426,14 @@ define void @test_vsoxseg6_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14443,14 +14443,14 @@ define void @test_vsoxseg6_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14463,14 +14463,14 @@ define void @test_vsoxseg6_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14480,14 +14480,14 @@ define void @test_vsoxseg6_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14500,14 +14500,14 @@ define void @test_vsoxseg6_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14517,14 +14517,14 @@ define void @test_vsoxseg6_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14537,15 +14537,15 @@ define void @test_vsoxseg7_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14555,15 +14555,15 @@ define void @test_vsoxseg7_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14576,15 +14576,15 @@ define void @test_vsoxseg7_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14594,15 +14594,15 @@ define void @test_vsoxseg7_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14615,15 +14615,15 @@ define void @test_vsoxseg7_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14633,15 +14633,15 @@ define void @test_vsoxseg7_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14654,15 +14654,15 @@ define void @test_vsoxseg7_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14672,15 +14672,15 @@ define void @test_vsoxseg7_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14693,16 +14693,16 @@ define void @test_vsoxseg8_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14712,16 +14712,16 @@ define void @test_vsoxseg8_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14734,16 +14734,16 @@ define void @test_vsoxseg8_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14753,16 +14753,16 @@ define void @test_vsoxseg8_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14775,16 +14775,16 @@ define void @test_vsoxseg8_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14794,16 +14794,16 @@ define void @test_vsoxseg8_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14816,16 +14816,16 @@ define void @test_vsoxseg8_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14835,16 +14835,16 @@ define void @test_vsoxseg8_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14981,11 +14981,11 @@ define void @test_vsoxseg3_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, i64 %vl) @@ -14995,11 +14995,11 @@ define void @test_vsoxseg3_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15012,11 +15012,11 @@ define void @test_vsoxseg3_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, i64 %vl) @@ -15026,11 +15026,11 @@ define void @test_vsoxseg3_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15043,11 +15043,11 @@ define void @test_vsoxseg3_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, i64 %vl) @@ -15057,11 +15057,11 @@ define void @test_vsoxseg3_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15074,11 +15074,11 @@ define void @test_vsoxseg3_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, i64 %vl) @@ -15088,11 +15088,11 @@ define void @test_vsoxseg3_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15105,12 +15105,12 @@ define void @test_vsoxseg4_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15120,12 +15120,12 @@ define void @test_vsoxseg4_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15138,12 +15138,12 @@ define void @test_vsoxseg4_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15153,12 +15153,12 @@ define void @test_vsoxseg4_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15171,12 +15171,12 @@ define void @test_vsoxseg4_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15186,12 +15186,12 @@ define void @test_vsoxseg4_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15204,12 +15204,12 @@ define void @test_vsoxseg4_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15219,12 +15219,12 @@ define void @test_vsoxseg4_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15237,13 +15237,13 @@ define void @test_vsoxseg5_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15253,13 +15253,13 @@ define void @test_vsoxseg5_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15272,13 +15272,13 @@ define void @test_vsoxseg5_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15288,13 +15288,13 @@ define void @test_vsoxseg5_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15307,13 +15307,13 @@ define void @test_vsoxseg5_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15323,13 +15323,13 @@ define void @test_vsoxseg5_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15342,13 +15342,13 @@ define void @test_vsoxseg5_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15358,13 +15358,13 @@ define void @test_vsoxseg5_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15377,14 +15377,14 @@ define void @test_vsoxseg6_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15394,14 +15394,14 @@ define void @test_vsoxseg6_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15414,14 +15414,14 @@ define void @test_vsoxseg6_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15431,14 +15431,14 @@ define void @test_vsoxseg6_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15451,14 +15451,14 @@ define void @test_vsoxseg6_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15468,14 +15468,14 @@ define void @test_vsoxseg6_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15488,14 +15488,14 @@ define void @test_vsoxseg6_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15505,14 +15505,14 @@ define void @test_vsoxseg6_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15525,15 +15525,15 @@ define void @test_vsoxseg7_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15543,15 +15543,15 @@ define void @test_vsoxseg7_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15564,15 +15564,15 @@ define void @test_vsoxseg7_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15582,15 +15582,15 @@ define void @test_vsoxseg7_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15603,15 +15603,15 @@ define void @test_vsoxseg7_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15621,15 +15621,15 @@ define void @test_vsoxseg7_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15642,15 +15642,15 @@ define void @test_vsoxseg7_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15660,15 +15660,15 @@ define void @test_vsoxseg7_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15681,16 +15681,16 @@ define void @test_vsoxseg8_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15700,16 +15700,16 @@ define void @test_vsoxseg8_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15722,16 +15722,16 @@ define void @test_vsoxseg8_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15741,16 +15741,16 @@ define void @test_vsoxseg8_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15763,16 +15763,16 @@ define void @test_vsoxseg8_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15782,16 +15782,16 @@ define void @test_vsoxseg8_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15804,16 +15804,16 @@ define void @test_vsoxseg8_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15823,16 +15823,16 @@ define void @test_vsoxseg8_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15965,11 +15965,11 @@ define void @test_vsoxseg3_nxv8f16_nxv8i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -15979,11 +15979,11 @@ define void @test_vsoxseg3_mask_nxv8f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -15996,11 +15996,11 @@ define void @test_vsoxseg3_nxv8f16_nxv8i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16010,11 +16010,11 @@ define void @test_vsoxseg3_mask_nxv8f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16058,11 +16058,11 @@ define void @test_vsoxseg3_nxv8f16_nxv8i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16072,11 +16072,11 @@ define void @test_vsoxseg3_mask_nxv8f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16089,12 +16089,12 @@ define void @test_vsoxseg4_nxv8f16_nxv8i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16104,12 +16104,12 @@ define void @test_vsoxseg4_mask_nxv8f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16122,12 +16122,12 @@ define void @test_vsoxseg4_nxv8f16_nxv8i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16137,12 +16137,12 @@ define void @test_vsoxseg4_mask_nxv8f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16188,12 +16188,12 @@ define void @test_vsoxseg4_nxv8f16_nxv8i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16203,12 +16203,12 @@ define void @test_vsoxseg4_mask_nxv8f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16467,11 +16467,11 @@ define void @test_vsoxseg3_nxv2f64_nxv2i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16481,11 +16481,11 @@ define void @test_vsoxseg3_mask_nxv2f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16498,11 +16498,11 @@ define void @test_vsoxseg3_nxv2f64_nxv2i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16512,11 +16512,11 @@ define void @test_vsoxseg3_mask_nxv2f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16529,11 +16529,11 @@ define void @test_vsoxseg3_nxv2f64_nxv2i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16543,11 +16543,11 @@ define void @test_vsoxseg3_mask_nxv2f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16560,11 +16560,11 @@ define void @test_vsoxseg3_nxv2f64_nxv2i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16574,11 +16574,11 @@ define void @test_vsoxseg3_mask_nxv2f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16591,12 +16591,12 @@ define void @test_vsoxseg4_nxv2f64_nxv2i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16606,12 +16606,12 @@ define void @test_vsoxseg4_mask_nxv2f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16624,12 +16624,12 @@ define void @test_vsoxseg4_nxv2f64_nxv2i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16639,12 +16639,12 @@ define void @test_vsoxseg4_mask_nxv2f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16657,12 +16657,12 @@ define void @test_vsoxseg4_nxv2f64_nxv2i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16672,12 +16672,12 @@ define void @test_vsoxseg4_mask_nxv2f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16690,12 +16690,12 @@ define void @test_vsoxseg4_nxv2f64_nxv2i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16705,12 +16705,12 @@ define void @test_vsoxseg4_mask_nxv2f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16843,11 +16843,11 @@ define void @test_vsoxseg3_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16857,11 +16857,11 @@ define void @test_vsoxseg3_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16874,11 +16874,11 @@ define void @test_vsoxseg3_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16888,11 +16888,11 @@ define void @test_vsoxseg3_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16936,11 +16936,11 @@ define void @test_vsoxseg3_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16950,11 +16950,11 @@ define void @test_vsoxseg3_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16967,12 +16967,12 @@ define void @test_vsoxseg4_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16982,12 +16982,12 @@ define void @test_vsoxseg4_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17000,12 +17000,12 @@ define void @test_vsoxseg4_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17015,12 +17015,12 @@ define void @test_vsoxseg4_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17066,12 +17066,12 @@ define void @test_vsoxseg4_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17081,12 +17081,12 @@ define void @test_vsoxseg4_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17099,13 +17099,13 @@ define void @test_vsoxseg5_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17115,13 +17115,13 @@ define void @test_vsoxseg5_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17134,13 +17134,13 @@ define void @test_vsoxseg5_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17150,13 +17150,13 @@ define void @test_vsoxseg5_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17169,13 +17169,13 @@ define void @test_vsoxseg5_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17185,13 +17185,13 @@ define void @test_vsoxseg5_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17204,13 +17204,13 @@ define void @test_vsoxseg5_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17220,13 +17220,13 @@ define void @test_vsoxseg5_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17239,14 +17239,14 @@ define void @test_vsoxseg6_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17256,14 +17256,14 @@ define void @test_vsoxseg6_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17276,14 +17276,14 @@ define void @test_vsoxseg6_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17293,14 +17293,14 @@ define void @test_vsoxseg6_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17313,14 +17313,14 @@ define void @test_vsoxseg6_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17330,14 +17330,14 @@ define void @test_vsoxseg6_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17350,14 +17350,14 @@ define void @test_vsoxseg6_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17367,14 +17367,14 @@ define void @test_vsoxseg6_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17387,15 +17387,15 @@ define void @test_vsoxseg7_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17405,15 +17405,15 @@ define void @test_vsoxseg7_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17426,15 +17426,15 @@ define void @test_vsoxseg7_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17444,15 +17444,15 @@ define void @test_vsoxseg7_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17465,15 +17465,15 @@ define void @test_vsoxseg7_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17483,15 +17483,15 @@ define void @test_vsoxseg7_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17504,15 +17504,15 @@ define void @test_vsoxseg7_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17522,15 +17522,15 @@ define void @test_vsoxseg7_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17543,16 +17543,16 @@ define void @test_vsoxseg8_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17562,16 +17562,16 @@ define void @test_vsoxseg8_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17584,16 +17584,16 @@ define void @test_vsoxseg8_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17603,16 +17603,16 @@ define void @test_vsoxseg8_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17625,16 +17625,16 @@ define void @test_vsoxseg8_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17644,16 +17644,16 @@ define void @test_vsoxseg8_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17666,16 +17666,16 @@ define void @test_vsoxseg8_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17685,16 +17685,16 @@ define void @test_vsoxseg8_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17829,11 +17829,11 @@ define void @test_vsoxseg3_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17843,11 +17843,11 @@ define void @test_vsoxseg3_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17860,11 +17860,11 @@ define void @test_vsoxseg3_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17874,11 +17874,11 @@ define void @test_vsoxseg3_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17891,11 +17891,11 @@ define void @test_vsoxseg3_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17905,11 +17905,11 @@ define void @test_vsoxseg3_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17922,11 +17922,11 @@ define void @test_vsoxseg3_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17936,11 +17936,11 @@ define void @test_vsoxseg3_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17953,12 +17953,12 @@ define void @test_vsoxseg4_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17968,12 +17968,12 @@ define void @test_vsoxseg4_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17986,12 +17986,12 @@ define void @test_vsoxseg4_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18001,12 +18001,12 @@ define void @test_vsoxseg4_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18019,12 +18019,12 @@ define void @test_vsoxseg4_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18034,12 +18034,12 @@ define void @test_vsoxseg4_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18052,12 +18052,12 @@ define void @test_vsoxseg4_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18067,12 +18067,12 @@ define void @test_vsoxseg4_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18085,13 +18085,13 @@ define void @test_vsoxseg5_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18101,13 +18101,13 @@ define void @test_vsoxseg5_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18120,13 +18120,13 @@ define void @test_vsoxseg5_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18136,13 +18136,13 @@ define void @test_vsoxseg5_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18155,13 +18155,13 @@ define void @test_vsoxseg5_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18171,13 +18171,13 @@ define void @test_vsoxseg5_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18190,13 +18190,13 @@ define void @test_vsoxseg5_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18206,13 +18206,13 @@ define void @test_vsoxseg5_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18225,14 +18225,14 @@ define void @test_vsoxseg6_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18242,14 +18242,14 @@ define void @test_vsoxseg6_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18262,14 +18262,14 @@ define void @test_vsoxseg6_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18279,14 +18279,14 @@ define void @test_vsoxseg6_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18299,14 +18299,14 @@ define void @test_vsoxseg6_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18316,14 +18316,14 @@ define void @test_vsoxseg6_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18336,14 +18336,14 @@ define void @test_vsoxseg6_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18353,14 +18353,14 @@ define void @test_vsoxseg6_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18373,15 +18373,15 @@ define void @test_vsoxseg7_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18391,15 +18391,15 @@ define void @test_vsoxseg7_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18412,15 +18412,15 @@ define void @test_vsoxseg7_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18430,15 +18430,15 @@ define void @test_vsoxseg7_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18451,15 +18451,15 @@ define void @test_vsoxseg7_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18469,15 +18469,15 @@ define void @test_vsoxseg7_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18490,15 +18490,15 @@ define void @test_vsoxseg7_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18508,15 +18508,15 @@ define void @test_vsoxseg7_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18529,16 +18529,16 @@ define void @test_vsoxseg8_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18548,16 +18548,16 @@ define void @test_vsoxseg8_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18570,16 +18570,16 @@ define void @test_vsoxseg8_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18589,16 +18589,16 @@ define void @test_vsoxseg8_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18611,16 +18611,16 @@ define void @test_vsoxseg8_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18630,16 +18630,16 @@ define void @test_vsoxseg8_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18652,16 +18652,16 @@ define void @test_vsoxseg8_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18671,16 +18671,16 @@ define void @test_vsoxseg8_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18815,11 +18815,11 @@ define void @test_vsoxseg3_nxv4f32_nxv4i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18829,11 +18829,11 @@ define void @test_vsoxseg3_mask_nxv4f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18846,11 +18846,11 @@ define void @test_vsoxseg3_nxv4f32_nxv4i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18860,11 +18860,11 @@ define void @test_vsoxseg3_mask_nxv4f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18877,11 +18877,11 @@ define void @test_vsoxseg3_nxv4f32_nxv4i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18891,11 +18891,11 @@ define void @test_vsoxseg3_mask_nxv4f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18908,11 +18908,11 @@ define void @test_vsoxseg3_nxv4f32_nxv4i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18922,11 +18922,11 @@ define void @test_vsoxseg3_mask_nxv4f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18939,12 +18939,12 @@ define void @test_vsoxseg4_nxv4f32_nxv4i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -18954,12 +18954,12 @@ define void @test_vsoxseg4_mask_nxv4f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18972,12 +18972,12 @@ define void @test_vsoxseg4_nxv4f32_nxv4i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -18987,12 +18987,12 @@ define void @test_vsoxseg4_mask_nxv4f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -19005,12 +19005,12 @@ define void @test_vsoxseg4_nxv4f32_nxv4i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v12 +; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -19020,12 +19020,12 @@ define void @test_vsoxseg4_mask_nxv4f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -19038,12 +19038,12 @@ define void @test_vsoxseg4_nxv4f32_nxv4i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -19053,12 +19053,12 @@ define void @test_vsoxseg4_mask_nxv4f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll @@ -192,11 +192,11 @@ define void @test_vsuxseg3_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -206,11 +206,11 @@ define void @test_vsuxseg3_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -223,11 +223,11 @@ define void @test_vsuxseg3_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -237,11 +237,11 @@ define void @test_vsuxseg3_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -254,11 +254,11 @@ define void @test_vsuxseg3_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -268,11 +268,11 @@ define void @test_vsuxseg3_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -285,12 +285,12 @@ define void @test_vsuxseg4_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -300,12 +300,12 @@ define void @test_vsuxseg4_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -318,12 +318,12 @@ define void @test_vsuxseg4_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -333,12 +333,12 @@ define void @test_vsuxseg4_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -351,12 +351,12 @@ define void @test_vsuxseg4_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -366,12 +366,12 @@ define void @test_vsuxseg4_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -384,13 +384,13 @@ define void @test_vsuxseg5_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -400,13 +400,13 @@ define void @test_vsuxseg5_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -419,13 +419,13 @@ define void @test_vsuxseg5_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -435,13 +435,13 @@ define void @test_vsuxseg5_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -454,13 +454,13 @@ define void @test_vsuxseg5_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -470,13 +470,13 @@ define void @test_vsuxseg5_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -489,14 +489,14 @@ define void @test_vsuxseg6_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -506,14 +506,14 @@ define void @test_vsuxseg6_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -526,14 +526,14 @@ define void @test_vsuxseg6_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -543,14 +543,14 @@ define void @test_vsuxseg6_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -563,14 +563,14 @@ define void @test_vsuxseg6_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -580,14 +580,14 @@ define void @test_vsuxseg6_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -600,15 +600,15 @@ define void @test_vsuxseg7_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -618,15 +618,15 @@ define void @test_vsuxseg7_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -639,15 +639,15 @@ define void @test_vsuxseg7_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -657,15 +657,15 @@ define void @test_vsuxseg7_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -678,15 +678,15 @@ define void @test_vsuxseg7_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -696,15 +696,15 @@ define void @test_vsuxseg7_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -717,16 +717,16 @@ define void @test_vsuxseg8_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -736,16 +736,16 @@ define void @test_vsuxseg8_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -758,16 +758,16 @@ define void @test_vsuxseg8_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -777,16 +777,16 @@ define void @test_vsuxseg8_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -799,16 +799,16 @@ define void @test_vsuxseg8_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -818,16 +818,16 @@ define void @test_vsuxseg8_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -929,11 +929,11 @@ define void @test_vsuxseg3_nxv16i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -943,11 +943,11 @@ define void @test_vsuxseg3_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -960,11 +960,11 @@ define void @test_vsuxseg3_nxv16i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -974,11 +974,11 @@ define void @test_vsuxseg3_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -1022,12 +1022,12 @@ define void @test_vsuxseg4_nxv16i8_nxv16i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -1037,12 +1037,12 @@ define void @test_vsuxseg4_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -1055,12 +1055,12 @@ define void @test_vsuxseg4_nxv16i8_nxv16i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -1070,12 +1070,12 @@ define void @test_vsuxseg4_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -1214,11 +1214,11 @@ define void @test_vsuxseg3_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1228,11 +1228,11 @@ define void @test_vsuxseg3_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1245,11 +1245,11 @@ define void @test_vsuxseg3_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1259,11 +1259,11 @@ define void @test_vsuxseg3_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1276,11 +1276,11 @@ define void @test_vsuxseg3_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1290,11 +1290,11 @@ define void @test_vsuxseg3_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1307,12 +1307,12 @@ define void @test_vsuxseg4_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1322,12 +1322,12 @@ define void @test_vsuxseg4_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1340,12 +1340,12 @@ define void @test_vsuxseg4_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1355,12 +1355,12 @@ define void @test_vsuxseg4_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1373,12 +1373,12 @@ define void @test_vsuxseg4_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1388,12 +1388,12 @@ define void @test_vsuxseg4_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1406,13 +1406,13 @@ define void @test_vsuxseg5_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1422,13 +1422,13 @@ define void @test_vsuxseg5_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1441,13 +1441,13 @@ define void @test_vsuxseg5_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1457,13 +1457,13 @@ define void @test_vsuxseg5_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1476,13 +1476,13 @@ define void @test_vsuxseg5_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1492,13 +1492,13 @@ define void @test_vsuxseg5_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1511,14 +1511,14 @@ define void @test_vsuxseg6_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1528,14 +1528,14 @@ define void @test_vsuxseg6_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1548,14 +1548,14 @@ define void @test_vsuxseg6_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1565,14 +1565,14 @@ define void @test_vsuxseg6_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1585,14 +1585,14 @@ define void @test_vsuxseg6_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1602,14 +1602,14 @@ define void @test_vsuxseg6_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1622,15 +1622,15 @@ define void @test_vsuxseg7_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1640,15 +1640,15 @@ define void @test_vsuxseg7_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1661,15 +1661,15 @@ define void @test_vsuxseg7_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1679,15 +1679,15 @@ define void @test_vsuxseg7_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1700,15 +1700,15 @@ define void @test_vsuxseg7_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1718,15 +1718,15 @@ define void @test_vsuxseg7_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1739,16 +1739,16 @@ define void @test_vsuxseg8_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1758,16 +1758,16 @@ define void @test_vsuxseg8_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1780,16 +1780,16 @@ define void @test_vsuxseg8_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1799,16 +1799,16 @@ define void @test_vsuxseg8_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1821,16 +1821,16 @@ define void @test_vsuxseg8_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1840,16 +1840,16 @@ define void @test_vsuxseg8_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1953,11 +1953,11 @@ define void @test_vsuxseg3_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -1967,11 +1967,11 @@ define void @test_vsuxseg3_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -1984,11 +1984,11 @@ define void @test_vsuxseg3_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -1998,11 +1998,11 @@ define void @test_vsuxseg3_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2015,11 +2015,11 @@ define void @test_vsuxseg3_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2029,11 +2029,11 @@ define void @test_vsuxseg3_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2046,12 +2046,12 @@ define void @test_vsuxseg4_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2061,12 +2061,12 @@ define void @test_vsuxseg4_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2079,12 +2079,12 @@ define void @test_vsuxseg4_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2094,12 +2094,12 @@ define void @test_vsuxseg4_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2112,12 +2112,12 @@ define void @test_vsuxseg4_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2127,12 +2127,12 @@ define void @test_vsuxseg4_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2145,13 +2145,13 @@ define void @test_vsuxseg5_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2161,13 +2161,13 @@ define void @test_vsuxseg5_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2180,13 +2180,13 @@ define void @test_vsuxseg5_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2196,13 +2196,13 @@ define void @test_vsuxseg5_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2215,13 +2215,13 @@ define void @test_vsuxseg5_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2231,13 +2231,13 @@ define void @test_vsuxseg5_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2250,14 +2250,14 @@ define void @test_vsuxseg6_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2267,14 +2267,14 @@ define void @test_vsuxseg6_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2287,14 +2287,14 @@ define void @test_vsuxseg6_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2304,14 +2304,14 @@ define void @test_vsuxseg6_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2324,14 +2324,14 @@ define void @test_vsuxseg6_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2341,14 +2341,14 @@ define void @test_vsuxseg6_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2361,15 +2361,15 @@ define void @test_vsuxseg7_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2379,15 +2379,15 @@ define void @test_vsuxseg7_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2400,15 +2400,15 @@ define void @test_vsuxseg7_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2418,15 +2418,15 @@ define void @test_vsuxseg7_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2439,15 +2439,15 @@ define void @test_vsuxseg7_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2457,15 +2457,15 @@ define void @test_vsuxseg7_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2478,16 +2478,16 @@ define void @test_vsuxseg8_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2497,16 +2497,16 @@ define void @test_vsuxseg8_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2519,16 +2519,16 @@ define void @test_vsuxseg8_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2538,16 +2538,16 @@ define void @test_vsuxseg8_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2560,16 +2560,16 @@ define void @test_vsuxseg8_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2579,16 +2579,16 @@ define void @test_vsuxseg8_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2694,11 +2694,11 @@ define void @test_vsuxseg3_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2708,11 +2708,11 @@ define void @test_vsuxseg3_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2725,11 +2725,11 @@ define void @test_vsuxseg3_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2739,11 +2739,11 @@ define void @test_vsuxseg3_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2756,11 +2756,11 @@ define void @test_vsuxseg3_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2770,11 +2770,11 @@ define void @test_vsuxseg3_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2787,12 +2787,12 @@ define void @test_vsuxseg4_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2802,12 +2802,12 @@ define void @test_vsuxseg4_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2820,12 +2820,12 @@ define void @test_vsuxseg4_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2835,12 +2835,12 @@ define void @test_vsuxseg4_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2853,12 +2853,12 @@ define void @test_vsuxseg4_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2868,12 +2868,12 @@ define void @test_vsuxseg4_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2886,13 +2886,13 @@ define void @test_vsuxseg5_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2902,13 +2902,13 @@ define void @test_vsuxseg5_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2921,13 +2921,13 @@ define void @test_vsuxseg5_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2937,13 +2937,13 @@ define void @test_vsuxseg5_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2956,13 +2956,13 @@ define void @test_vsuxseg5_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2972,13 +2972,13 @@ define void @test_vsuxseg5_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2991,14 +2991,14 @@ define void @test_vsuxseg6_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3008,14 +3008,14 @@ define void @test_vsuxseg6_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3028,14 +3028,14 @@ define void @test_vsuxseg6_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3045,14 +3045,14 @@ define void @test_vsuxseg6_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3065,14 +3065,14 @@ define void @test_vsuxseg6_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3082,14 +3082,14 @@ define void @test_vsuxseg6_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3102,15 +3102,15 @@ define void @test_vsuxseg7_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3120,15 +3120,15 @@ define void @test_vsuxseg7_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3141,15 +3141,15 @@ define void @test_vsuxseg7_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3159,15 +3159,15 @@ define void @test_vsuxseg7_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3180,15 +3180,15 @@ define void @test_vsuxseg7_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3198,15 +3198,15 @@ define void @test_vsuxseg7_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3219,16 +3219,16 @@ define void @test_vsuxseg8_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3238,16 +3238,16 @@ define void @test_vsuxseg8_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3260,16 +3260,16 @@ define void @test_vsuxseg8_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3279,16 +3279,16 @@ define void @test_vsuxseg8_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3301,16 +3301,16 @@ define void @test_vsuxseg8_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3320,16 +3320,16 @@ define void @test_vsuxseg8_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3433,11 +3433,11 @@ define void @test_vsuxseg3_nxv8i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3447,11 +3447,11 @@ define void @test_vsuxseg3_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3464,11 +3464,11 @@ define void @test_vsuxseg3_nxv8i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3478,11 +3478,11 @@ define void @test_vsuxseg3_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3495,11 +3495,11 @@ define void @test_vsuxseg3_nxv8i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3509,11 +3509,11 @@ define void @test_vsuxseg3_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3526,12 +3526,12 @@ define void @test_vsuxseg4_nxv8i16_nxv8i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3541,12 +3541,12 @@ define void @test_vsuxseg4_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3559,12 +3559,12 @@ define void @test_vsuxseg4_nxv8i16_nxv8i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3574,12 +3574,12 @@ define void @test_vsuxseg4_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3592,12 +3592,12 @@ define void @test_vsuxseg4_nxv8i16_nxv8i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3607,12 +3607,12 @@ define void @test_vsuxseg4_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3714,11 +3714,11 @@ define void @test_vsuxseg3_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3728,11 +3728,11 @@ define void @test_vsuxseg3_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3745,11 +3745,11 @@ define void @test_vsuxseg3_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3759,11 +3759,11 @@ define void @test_vsuxseg3_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3807,12 +3807,12 @@ define void @test_vsuxseg4_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3822,12 +3822,12 @@ define void @test_vsuxseg4_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3840,12 +3840,12 @@ define void @test_vsuxseg4_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3855,12 +3855,12 @@ define void @test_vsuxseg4_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3906,13 +3906,13 @@ define void @test_vsuxseg5_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3922,13 +3922,13 @@ define void @test_vsuxseg5_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3941,13 +3941,13 @@ define void @test_vsuxseg5_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3957,13 +3957,13 @@ define void @test_vsuxseg5_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3976,13 +3976,13 @@ define void @test_vsuxseg5_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3992,13 +3992,13 @@ define void @test_vsuxseg5_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4011,14 +4011,14 @@ define void @test_vsuxseg6_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4028,14 +4028,14 @@ define void @test_vsuxseg6_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4048,14 +4048,14 @@ define void @test_vsuxseg6_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4065,14 +4065,14 @@ define void @test_vsuxseg6_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4085,14 +4085,14 @@ define void @test_vsuxseg6_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4102,14 +4102,14 @@ define void @test_vsuxseg6_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4122,15 +4122,15 @@ define void @test_vsuxseg7_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4140,15 +4140,15 @@ define void @test_vsuxseg7_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4161,15 +4161,15 @@ define void @test_vsuxseg7_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4179,15 +4179,15 @@ define void @test_vsuxseg7_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4200,15 +4200,15 @@ define void @test_vsuxseg7_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4218,15 +4218,15 @@ define void @test_vsuxseg7_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4239,16 +4239,16 @@ define void @test_vsuxseg8_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4258,16 +4258,16 @@ define void @test_vsuxseg8_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4280,16 +4280,16 @@ define void @test_vsuxseg8_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4299,16 +4299,16 @@ define void @test_vsuxseg8_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4321,16 +4321,16 @@ define void @test_vsuxseg8_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4340,16 +4340,16 @@ define void @test_vsuxseg8_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4546,11 +4546,11 @@ define void @test_vsuxseg3_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4560,11 +4560,11 @@ define void @test_vsuxseg3_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4577,11 +4577,11 @@ define void @test_vsuxseg3_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4591,11 +4591,11 @@ define void @test_vsuxseg3_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4608,11 +4608,11 @@ define void @test_vsuxseg3_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4622,11 +4622,11 @@ define void @test_vsuxseg3_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4639,12 +4639,12 @@ define void @test_vsuxseg4_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4654,12 +4654,12 @@ define void @test_vsuxseg4_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4672,12 +4672,12 @@ define void @test_vsuxseg4_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4687,12 +4687,12 @@ define void @test_vsuxseg4_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4705,12 +4705,12 @@ define void @test_vsuxseg4_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4720,12 +4720,12 @@ define void @test_vsuxseg4_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4738,13 +4738,13 @@ define void @test_vsuxseg5_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4754,13 +4754,13 @@ define void @test_vsuxseg5_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4773,13 +4773,13 @@ define void @test_vsuxseg5_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4789,13 +4789,13 @@ define void @test_vsuxseg5_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4808,13 +4808,13 @@ define void @test_vsuxseg5_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4824,13 +4824,13 @@ define void @test_vsuxseg5_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4843,14 +4843,14 @@ define void @test_vsuxseg6_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4860,14 +4860,14 @@ define void @test_vsuxseg6_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4880,14 +4880,14 @@ define void @test_vsuxseg6_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4897,14 +4897,14 @@ define void @test_vsuxseg6_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4917,14 +4917,14 @@ define void @test_vsuxseg6_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4934,14 +4934,14 @@ define void @test_vsuxseg6_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4954,15 +4954,15 @@ define void @test_vsuxseg7_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4972,15 +4972,15 @@ define void @test_vsuxseg7_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4993,15 +4993,15 @@ define void @test_vsuxseg7_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5011,15 +5011,15 @@ define void @test_vsuxseg7_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5032,15 +5032,15 @@ define void @test_vsuxseg7_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5050,15 +5050,15 @@ define void @test_vsuxseg7_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5071,16 +5071,16 @@ define void @test_vsuxseg8_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5090,16 +5090,16 @@ define void @test_vsuxseg8_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5112,16 +5112,16 @@ define void @test_vsuxseg8_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5131,16 +5131,16 @@ define void @test_vsuxseg8_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5153,16 +5153,16 @@ define void @test_vsuxseg8_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5172,16 +5172,16 @@ define void @test_vsuxseg8_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5287,11 +5287,11 @@ define void @test_vsuxseg3_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5301,11 +5301,11 @@ define void @test_vsuxseg3_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5318,11 +5318,11 @@ define void @test_vsuxseg3_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5332,11 +5332,11 @@ define void @test_vsuxseg3_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5349,11 +5349,11 @@ define void @test_vsuxseg3_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5363,11 +5363,11 @@ define void @test_vsuxseg3_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5380,12 +5380,12 @@ define void @test_vsuxseg4_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5395,12 +5395,12 @@ define void @test_vsuxseg4_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5413,12 +5413,12 @@ define void @test_vsuxseg4_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5428,12 +5428,12 @@ define void @test_vsuxseg4_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5446,12 +5446,12 @@ define void @test_vsuxseg4_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5461,12 +5461,12 @@ define void @test_vsuxseg4_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5479,13 +5479,13 @@ define void @test_vsuxseg5_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5495,13 +5495,13 @@ define void @test_vsuxseg5_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5514,13 +5514,13 @@ define void @test_vsuxseg5_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5530,13 +5530,13 @@ define void @test_vsuxseg5_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5549,13 +5549,13 @@ define void @test_vsuxseg5_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5565,13 +5565,13 @@ define void @test_vsuxseg5_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5584,14 +5584,14 @@ define void @test_vsuxseg6_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5601,14 +5601,14 @@ define void @test_vsuxseg6_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5621,14 +5621,14 @@ define void @test_vsuxseg6_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5638,14 +5638,14 @@ define void @test_vsuxseg6_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5658,14 +5658,14 @@ define void @test_vsuxseg6_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5675,14 +5675,14 @@ define void @test_vsuxseg6_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5695,15 +5695,15 @@ define void @test_vsuxseg7_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5713,15 +5713,15 @@ define void @test_vsuxseg7_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5734,15 +5734,15 @@ define void @test_vsuxseg7_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5752,15 +5752,15 @@ define void @test_vsuxseg7_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5773,15 +5773,15 @@ define void @test_vsuxseg7_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5791,15 +5791,15 @@ define void @test_vsuxseg7_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5812,16 +5812,16 @@ define void @test_vsuxseg8_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5831,16 +5831,16 @@ define void @test_vsuxseg8_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5853,16 +5853,16 @@ define void @test_vsuxseg8_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5872,16 +5872,16 @@ define void @test_vsuxseg8_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5894,16 +5894,16 @@ define void @test_vsuxseg8_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5913,16 +5913,16 @@ define void @test_vsuxseg8_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6088,11 +6088,11 @@ define void @test_vsuxseg3_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6102,11 +6102,11 @@ define void @test_vsuxseg3_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6119,11 +6119,11 @@ define void @test_vsuxseg3_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6133,11 +6133,11 @@ define void @test_vsuxseg3_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6150,11 +6150,11 @@ define void @test_vsuxseg3_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6164,11 +6164,11 @@ define void @test_vsuxseg3_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6181,12 +6181,12 @@ define void @test_vsuxseg4_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6196,12 +6196,12 @@ define void @test_vsuxseg4_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6214,12 +6214,12 @@ define void @test_vsuxseg4_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6229,12 +6229,12 @@ define void @test_vsuxseg4_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6247,12 +6247,12 @@ define void @test_vsuxseg4_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6262,12 +6262,12 @@ define void @test_vsuxseg4_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6280,13 +6280,13 @@ define void @test_vsuxseg5_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6296,13 +6296,13 @@ define void @test_vsuxseg5_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6315,13 +6315,13 @@ define void @test_vsuxseg5_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6331,13 +6331,13 @@ define void @test_vsuxseg5_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6350,13 +6350,13 @@ define void @test_vsuxseg5_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6366,13 +6366,13 @@ define void @test_vsuxseg5_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6385,14 +6385,14 @@ define void @test_vsuxseg6_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6402,14 +6402,14 @@ define void @test_vsuxseg6_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6422,14 +6422,14 @@ define void @test_vsuxseg6_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6439,14 +6439,14 @@ define void @test_vsuxseg6_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6459,14 +6459,14 @@ define void @test_vsuxseg6_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6476,14 +6476,14 @@ define void @test_vsuxseg6_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6496,15 +6496,15 @@ define void @test_vsuxseg7_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6514,15 +6514,15 @@ define void @test_vsuxseg7_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6535,15 +6535,15 @@ define void @test_vsuxseg7_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6553,15 +6553,15 @@ define void @test_vsuxseg7_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6574,15 +6574,15 @@ define void @test_vsuxseg7_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6592,15 +6592,15 @@ define void @test_vsuxseg7_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6613,16 +6613,16 @@ define void @test_vsuxseg8_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6632,16 +6632,16 @@ define void @test_vsuxseg8_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6654,16 +6654,16 @@ define void @test_vsuxseg8_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6673,16 +6673,16 @@ define void @test_vsuxseg8_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6695,16 +6695,16 @@ define void @test_vsuxseg8_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6714,16 +6714,16 @@ define void @test_vsuxseg8_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6829,11 +6829,11 @@ define void @test_vsuxseg3_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6843,11 +6843,11 @@ define void @test_vsuxseg3_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6860,11 +6860,11 @@ define void @test_vsuxseg3_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6874,11 +6874,11 @@ define void @test_vsuxseg3_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6891,11 +6891,11 @@ define void @test_vsuxseg3_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6905,11 +6905,11 @@ define void @test_vsuxseg3_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6922,12 +6922,12 @@ define void @test_vsuxseg4_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6937,12 +6937,12 @@ define void @test_vsuxseg4_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6955,12 +6955,12 @@ define void @test_vsuxseg4_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6970,12 +6970,12 @@ define void @test_vsuxseg4_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6988,12 +6988,12 @@ define void @test_vsuxseg4_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7003,12 +7003,12 @@ define void @test_vsuxseg4_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7021,13 +7021,13 @@ define void @test_vsuxseg5_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7037,13 +7037,13 @@ define void @test_vsuxseg5_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7056,13 +7056,13 @@ define void @test_vsuxseg5_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7072,13 +7072,13 @@ define void @test_vsuxseg5_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7091,13 +7091,13 @@ define void @test_vsuxseg5_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7107,13 +7107,13 @@ define void @test_vsuxseg5_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7126,14 +7126,14 @@ define void @test_vsuxseg6_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7143,14 +7143,14 @@ define void @test_vsuxseg6_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7163,14 +7163,14 @@ define void @test_vsuxseg6_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7180,14 +7180,14 @@ define void @test_vsuxseg6_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7200,14 +7200,14 @@ define void @test_vsuxseg6_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7217,14 +7217,14 @@ define void @test_vsuxseg6_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7237,15 +7237,15 @@ define void @test_vsuxseg7_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7255,15 +7255,15 @@ define void @test_vsuxseg7_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7276,15 +7276,15 @@ define void @test_vsuxseg7_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7294,15 +7294,15 @@ define void @test_vsuxseg7_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7315,15 +7315,15 @@ define void @test_vsuxseg7_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7333,15 +7333,15 @@ define void @test_vsuxseg7_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7354,16 +7354,16 @@ define void @test_vsuxseg8_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7373,16 +7373,16 @@ define void @test_vsuxseg8_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7395,16 +7395,16 @@ define void @test_vsuxseg8_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7414,16 +7414,16 @@ define void @test_vsuxseg8_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7436,16 +7436,16 @@ define void @test_vsuxseg8_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7455,16 +7455,16 @@ define void @test_vsuxseg8_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7570,11 +7570,11 @@ define void @test_vsuxseg3_nxv4i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7584,11 +7584,11 @@ define void @test_vsuxseg3_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7601,11 +7601,11 @@ define void @test_vsuxseg3_nxv4i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7615,11 +7615,11 @@ define void @test_vsuxseg3_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7632,11 +7632,11 @@ define void @test_vsuxseg3_nxv4i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7646,11 +7646,11 @@ define void @test_vsuxseg3_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7663,12 +7663,12 @@ define void @test_vsuxseg4_nxv4i32_nxv4i16( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7678,12 +7678,12 @@ define void @test_vsuxseg4_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7696,12 +7696,12 @@ define void @test_vsuxseg4_nxv4i32_nxv4i8( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7711,12 +7711,12 @@ define void @test_vsuxseg4_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7729,12 +7729,12 @@ define void @test_vsuxseg4_nxv4i32_nxv4i32( %val, i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7744,12 +7744,12 @@ define void @test_vsuxseg4_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -8039,11 +8039,11 @@ define void @test_vsuxseg3_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, i32 %vl) @@ -8053,11 +8053,11 @@ define void @test_vsuxseg3_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8070,11 +8070,11 @@ define void @test_vsuxseg3_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, i32 %vl) @@ -8084,11 +8084,11 @@ define void @test_vsuxseg3_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8101,11 +8101,11 @@ define void @test_vsuxseg3_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, i32 %vl) @@ -8115,11 +8115,11 @@ define void @test_vsuxseg3_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8132,12 +8132,12 @@ define void @test_vsuxseg4_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8147,12 +8147,12 @@ define void @test_vsuxseg4_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8165,12 +8165,12 @@ define void @test_vsuxseg4_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8180,12 +8180,12 @@ define void @test_vsuxseg4_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8198,12 +8198,12 @@ define void @test_vsuxseg4_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8213,12 +8213,12 @@ define void @test_vsuxseg4_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8231,13 +8231,13 @@ define void @test_vsuxseg5_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8247,13 +8247,13 @@ define void @test_vsuxseg5_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8266,13 +8266,13 @@ define void @test_vsuxseg5_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8282,13 +8282,13 @@ define void @test_vsuxseg5_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8301,13 +8301,13 @@ define void @test_vsuxseg5_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8317,13 +8317,13 @@ define void @test_vsuxseg5_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8336,14 +8336,14 @@ define void @test_vsuxseg6_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8353,14 +8353,14 @@ define void @test_vsuxseg6_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8373,14 +8373,14 @@ define void @test_vsuxseg6_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8390,14 +8390,14 @@ define void @test_vsuxseg6_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8410,14 +8410,14 @@ define void @test_vsuxseg6_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8427,14 +8427,14 @@ define void @test_vsuxseg6_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8447,15 +8447,15 @@ define void @test_vsuxseg7_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8465,15 +8465,15 @@ define void @test_vsuxseg7_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8486,15 +8486,15 @@ define void @test_vsuxseg7_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8504,15 +8504,15 @@ define void @test_vsuxseg7_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8525,15 +8525,15 @@ define void @test_vsuxseg7_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8543,15 +8543,15 @@ define void @test_vsuxseg7_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8564,16 +8564,16 @@ define void @test_vsuxseg8_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8583,16 +8583,16 @@ define void @test_vsuxseg8_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8605,16 +8605,16 @@ define void @test_vsuxseg8_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8624,16 +8624,16 @@ define void @test_vsuxseg8_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8646,16 +8646,16 @@ define void @test_vsuxseg8_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8665,16 +8665,16 @@ define void @test_vsuxseg8_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8780,11 +8780,11 @@ define void @test_vsuxseg3_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, i32 %vl) @@ -8794,11 +8794,11 @@ define void @test_vsuxseg3_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8811,11 +8811,11 @@ define void @test_vsuxseg3_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, i32 %vl) @@ -8825,11 +8825,11 @@ define void @test_vsuxseg3_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8842,11 +8842,11 @@ define void @test_vsuxseg3_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, i32 %vl) @@ -8856,11 +8856,11 @@ define void @test_vsuxseg3_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8873,12 +8873,12 @@ define void @test_vsuxseg4_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8888,12 +8888,12 @@ define void @test_vsuxseg4_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8906,12 +8906,12 @@ define void @test_vsuxseg4_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8921,12 +8921,12 @@ define void @test_vsuxseg4_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8939,12 +8939,12 @@ define void @test_vsuxseg4_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8954,12 +8954,12 @@ define void @test_vsuxseg4_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8972,13 +8972,13 @@ define void @test_vsuxseg5_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8988,13 +8988,13 @@ define void @test_vsuxseg5_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9007,13 +9007,13 @@ define void @test_vsuxseg5_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9023,13 +9023,13 @@ define void @test_vsuxseg5_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9042,13 +9042,13 @@ define void @test_vsuxseg5_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9058,13 +9058,13 @@ define void @test_vsuxseg5_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9077,14 +9077,14 @@ define void @test_vsuxseg6_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9094,14 +9094,14 @@ define void @test_vsuxseg6_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9114,14 +9114,14 @@ define void @test_vsuxseg6_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9131,14 +9131,14 @@ define void @test_vsuxseg6_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9151,14 +9151,14 @@ define void @test_vsuxseg6_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9168,14 +9168,14 @@ define void @test_vsuxseg6_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9188,15 +9188,15 @@ define void @test_vsuxseg7_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9206,15 +9206,15 @@ define void @test_vsuxseg7_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9227,15 +9227,15 @@ define void @test_vsuxseg7_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9245,15 +9245,15 @@ define void @test_vsuxseg7_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9266,15 +9266,15 @@ define void @test_vsuxseg7_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9284,15 +9284,15 @@ define void @test_vsuxseg7_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9305,16 +9305,16 @@ define void @test_vsuxseg8_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9324,16 +9324,16 @@ define void @test_vsuxseg8_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9346,16 +9346,16 @@ define void @test_vsuxseg8_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9365,16 +9365,16 @@ define void @test_vsuxseg8_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9387,16 +9387,16 @@ define void @test_vsuxseg8_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9406,16 +9406,16 @@ define void @test_vsuxseg8_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9521,11 +9521,11 @@ define void @test_vsuxseg3_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -9535,11 +9535,11 @@ define void @test_vsuxseg3_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9552,11 +9552,11 @@ define void @test_vsuxseg3_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -9566,11 +9566,11 @@ define void @test_vsuxseg3_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9583,11 +9583,11 @@ define void @test_vsuxseg3_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -9597,11 +9597,11 @@ define void @test_vsuxseg3_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9614,12 +9614,12 @@ define void @test_vsuxseg4_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9629,12 +9629,12 @@ define void @test_vsuxseg4_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9647,12 +9647,12 @@ define void @test_vsuxseg4_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9662,12 +9662,12 @@ define void @test_vsuxseg4_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9680,12 +9680,12 @@ define void @test_vsuxseg4_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9695,12 +9695,12 @@ define void @test_vsuxseg4_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9713,13 +9713,13 @@ define void @test_vsuxseg5_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9729,13 +9729,13 @@ define void @test_vsuxseg5_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9748,13 +9748,13 @@ define void @test_vsuxseg5_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9764,13 +9764,13 @@ define void @test_vsuxseg5_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9783,13 +9783,13 @@ define void @test_vsuxseg5_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9799,13 +9799,13 @@ define void @test_vsuxseg5_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9818,14 +9818,14 @@ define void @test_vsuxseg6_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9835,14 +9835,14 @@ define void @test_vsuxseg6_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9855,14 +9855,14 @@ define void @test_vsuxseg6_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9872,14 +9872,14 @@ define void @test_vsuxseg6_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9892,14 +9892,14 @@ define void @test_vsuxseg6_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9909,14 +9909,14 @@ define void @test_vsuxseg6_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9929,15 +9929,15 @@ define void @test_vsuxseg7_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9947,15 +9947,15 @@ define void @test_vsuxseg7_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9968,15 +9968,15 @@ define void @test_vsuxseg7_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9986,15 +9986,15 @@ define void @test_vsuxseg7_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10007,15 +10007,15 @@ define void @test_vsuxseg7_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10025,15 +10025,15 @@ define void @test_vsuxseg7_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10046,16 +10046,16 @@ define void @test_vsuxseg8_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10065,16 +10065,16 @@ define void @test_vsuxseg8_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10087,16 +10087,16 @@ define void @test_vsuxseg8_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10106,16 +10106,16 @@ define void @test_vsuxseg8_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10128,16 +10128,16 @@ define void @test_vsuxseg8_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10147,16 +10147,16 @@ define void @test_vsuxseg8_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10262,11 +10262,11 @@ define void @test_vsuxseg3_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, i32 %vl) @@ -10276,11 +10276,11 @@ define void @test_vsuxseg3_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10293,11 +10293,11 @@ define void @test_vsuxseg3_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, i32 %vl) @@ -10307,11 +10307,11 @@ define void @test_vsuxseg3_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10324,11 +10324,11 @@ define void @test_vsuxseg3_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, i32 %vl) @@ -10338,11 +10338,11 @@ define void @test_vsuxseg3_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10355,12 +10355,12 @@ define void @test_vsuxseg4_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10370,12 +10370,12 @@ define void @test_vsuxseg4_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10388,12 +10388,12 @@ define void @test_vsuxseg4_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10403,12 +10403,12 @@ define void @test_vsuxseg4_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10421,12 +10421,12 @@ define void @test_vsuxseg4_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10436,12 +10436,12 @@ define void @test_vsuxseg4_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10454,13 +10454,13 @@ define void @test_vsuxseg5_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10470,13 +10470,13 @@ define void @test_vsuxseg5_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10489,13 +10489,13 @@ define void @test_vsuxseg5_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10505,13 +10505,13 @@ define void @test_vsuxseg5_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10524,13 +10524,13 @@ define void @test_vsuxseg5_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10540,13 +10540,13 @@ define void @test_vsuxseg5_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10559,14 +10559,14 @@ define void @test_vsuxseg6_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10576,14 +10576,14 @@ define void @test_vsuxseg6_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10596,14 +10596,14 @@ define void @test_vsuxseg6_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10613,14 +10613,14 @@ define void @test_vsuxseg6_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10633,14 +10633,14 @@ define void @test_vsuxseg6_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10650,14 +10650,14 @@ define void @test_vsuxseg6_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10670,15 +10670,15 @@ define void @test_vsuxseg7_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10688,15 +10688,15 @@ define void @test_vsuxseg7_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10709,15 +10709,15 @@ define void @test_vsuxseg7_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10727,15 +10727,15 @@ define void @test_vsuxseg7_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10748,15 +10748,15 @@ define void @test_vsuxseg7_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10766,15 +10766,15 @@ define void @test_vsuxseg7_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10787,16 +10787,16 @@ define void @test_vsuxseg8_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10806,16 +10806,16 @@ define void @test_vsuxseg8_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10828,16 +10828,16 @@ define void @test_vsuxseg8_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10847,16 +10847,16 @@ define void @test_vsuxseg8_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10869,16 +10869,16 @@ define void @test_vsuxseg8_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10888,16 +10888,16 @@ define void @test_vsuxseg8_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -11001,11 +11001,11 @@ define void @test_vsuxseg3_nxv8f16_nxv8i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11015,11 +11015,11 @@ define void @test_vsuxseg3_mask_nxv8f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11032,11 +11032,11 @@ define void @test_vsuxseg3_nxv8f16_nxv8i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11046,11 +11046,11 @@ define void @test_vsuxseg3_mask_nxv8f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11063,11 +11063,11 @@ define void @test_vsuxseg3_nxv8f16_nxv8i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11077,11 +11077,11 @@ define void @test_vsuxseg3_mask_nxv8f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11094,12 +11094,12 @@ define void @test_vsuxseg4_nxv8f16_nxv8i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11109,12 +11109,12 @@ define void @test_vsuxseg4_mask_nxv8f16_nxv8i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11127,12 +11127,12 @@ define void @test_vsuxseg4_nxv8f16_nxv8i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11142,12 +11142,12 @@ define void @test_vsuxseg4_mask_nxv8f16_nxv8i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11160,12 +11160,12 @@ define void @test_vsuxseg4_nxv8f16_nxv8i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11175,12 +11175,12 @@ define void @test_vsuxseg4_mask_nxv8f16_nxv8i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11379,11 +11379,11 @@ define void @test_vsuxseg3_nxv2f64_nxv2i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, i32 %vl) @@ -11393,11 +11393,11 @@ define void @test_vsuxseg3_mask_nxv2f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11410,11 +11410,11 @@ define void @test_vsuxseg3_nxv2f64_nxv2i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, i32 %vl) @@ -11424,11 +11424,11 @@ define void @test_vsuxseg3_mask_nxv2f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11441,11 +11441,11 @@ define void @test_vsuxseg3_nxv2f64_nxv2i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, i32 %vl) @@ -11455,11 +11455,11 @@ define void @test_vsuxseg3_mask_nxv2f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11472,12 +11472,12 @@ define void @test_vsuxseg4_nxv2f64_nxv2i32( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -11487,12 +11487,12 @@ define void @test_vsuxseg4_mask_nxv2f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11505,12 +11505,12 @@ define void @test_vsuxseg4_nxv2f64_nxv2i8( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -11520,12 +11520,12 @@ define void @test_vsuxseg4_mask_nxv2f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11538,12 +11538,12 @@ define void @test_vsuxseg4_nxv2f64_nxv2i16( %val, double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -11553,12 +11553,12 @@ define void @test_vsuxseg4_mask_nxv2f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11662,11 +11662,11 @@ define void @test_vsuxseg3_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11676,11 +11676,11 @@ define void @test_vsuxseg3_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11693,11 +11693,11 @@ define void @test_vsuxseg3_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11707,11 +11707,11 @@ define void @test_vsuxseg3_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11724,11 +11724,11 @@ define void @test_vsuxseg3_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11738,11 +11738,11 @@ define void @test_vsuxseg3_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11755,12 +11755,12 @@ define void @test_vsuxseg4_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11770,12 +11770,12 @@ define void @test_vsuxseg4_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11788,12 +11788,12 @@ define void @test_vsuxseg4_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11803,12 +11803,12 @@ define void @test_vsuxseg4_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11821,12 +11821,12 @@ define void @test_vsuxseg4_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11836,12 +11836,12 @@ define void @test_vsuxseg4_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11854,13 +11854,13 @@ define void @test_vsuxseg5_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11870,13 +11870,13 @@ define void @test_vsuxseg5_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11889,13 +11889,13 @@ define void @test_vsuxseg5_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11905,13 +11905,13 @@ define void @test_vsuxseg5_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11924,13 +11924,13 @@ define void @test_vsuxseg5_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11940,13 +11940,13 @@ define void @test_vsuxseg5_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11959,14 +11959,14 @@ define void @test_vsuxseg6_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11976,14 +11976,14 @@ define void @test_vsuxseg6_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11996,14 +11996,14 @@ define void @test_vsuxseg6_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12013,14 +12013,14 @@ define void @test_vsuxseg6_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12033,14 +12033,14 @@ define void @test_vsuxseg6_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12050,14 +12050,14 @@ define void @test_vsuxseg6_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12070,15 +12070,15 @@ define void @test_vsuxseg7_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12088,15 +12088,15 @@ define void @test_vsuxseg7_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12109,15 +12109,15 @@ define void @test_vsuxseg7_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12127,15 +12127,15 @@ define void @test_vsuxseg7_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12148,15 +12148,15 @@ define void @test_vsuxseg7_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12166,15 +12166,15 @@ define void @test_vsuxseg7_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12187,16 +12187,16 @@ define void @test_vsuxseg8_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12206,16 +12206,16 @@ define void @test_vsuxseg8_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12228,16 +12228,16 @@ define void @test_vsuxseg8_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12247,16 +12247,16 @@ define void @test_vsuxseg8_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12269,16 +12269,16 @@ define void @test_vsuxseg8_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12288,16 +12288,16 @@ define void @test_vsuxseg8_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12403,11 +12403,11 @@ define void @test_vsuxseg3_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -12417,11 +12417,11 @@ define void @test_vsuxseg3_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12434,11 +12434,11 @@ define void @test_vsuxseg3_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -12448,11 +12448,11 @@ define void @test_vsuxseg3_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12465,11 +12465,11 @@ define void @test_vsuxseg3_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -12479,11 +12479,11 @@ define void @test_vsuxseg3_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12496,12 +12496,12 @@ define void @test_vsuxseg4_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12511,12 +12511,12 @@ define void @test_vsuxseg4_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12529,12 +12529,12 @@ define void @test_vsuxseg4_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12544,12 +12544,12 @@ define void @test_vsuxseg4_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12562,12 +12562,12 @@ define void @test_vsuxseg4_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12577,12 +12577,12 @@ define void @test_vsuxseg4_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12595,13 +12595,13 @@ define void @test_vsuxseg5_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12611,13 +12611,13 @@ define void @test_vsuxseg5_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12630,13 +12630,13 @@ define void @test_vsuxseg5_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12646,13 +12646,13 @@ define void @test_vsuxseg5_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12665,13 +12665,13 @@ define void @test_vsuxseg5_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12681,13 +12681,13 @@ define void @test_vsuxseg5_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12700,14 +12700,14 @@ define void @test_vsuxseg6_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12717,14 +12717,14 @@ define void @test_vsuxseg6_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12737,14 +12737,14 @@ define void @test_vsuxseg6_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12754,14 +12754,14 @@ define void @test_vsuxseg6_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12774,14 +12774,14 @@ define void @test_vsuxseg6_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12791,14 +12791,14 @@ define void @test_vsuxseg6_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12811,15 +12811,15 @@ define void @test_vsuxseg7_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12829,15 +12829,15 @@ define void @test_vsuxseg7_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12850,15 +12850,15 @@ define void @test_vsuxseg7_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12868,15 +12868,15 @@ define void @test_vsuxseg7_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12889,15 +12889,15 @@ define void @test_vsuxseg7_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12907,15 +12907,15 @@ define void @test_vsuxseg7_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12928,16 +12928,16 @@ define void @test_vsuxseg8_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12947,16 +12947,16 @@ define void @test_vsuxseg8_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12969,16 +12969,16 @@ define void @test_vsuxseg8_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12988,16 +12988,16 @@ define void @test_vsuxseg8_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -13010,16 +13010,16 @@ define void @test_vsuxseg8_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -13029,16 +13029,16 @@ define void @test_vsuxseg8_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -13144,11 +13144,11 @@ define void @test_vsuxseg3_nxv4f32_nxv4i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, i32 %vl) @@ -13158,11 +13158,11 @@ define void @test_vsuxseg3_mask_nxv4f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13175,11 +13175,11 @@ define void @test_vsuxseg3_nxv4f32_nxv4i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, i32 %vl) @@ -13189,11 +13189,11 @@ define void @test_vsuxseg3_mask_nxv4f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13206,11 +13206,11 @@ define void @test_vsuxseg3_nxv4f32_nxv4i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, i32 %vl) @@ -13220,11 +13220,11 @@ define void @test_vsuxseg3_mask_nxv4f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13237,12 +13237,12 @@ define void @test_vsuxseg4_nxv4f32_nxv4i16( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -13252,12 +13252,12 @@ define void @test_vsuxseg4_mask_nxv4f32_nxv4i16( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13270,12 +13270,12 @@ define void @test_vsuxseg4_nxv4f32_nxv4i8( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -13285,12 +13285,12 @@ define void @test_vsuxseg4_mask_nxv4f32_nxv4i8( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13303,12 +13303,12 @@ define void @test_vsuxseg4_nxv4f32_nxv4i32( %val, float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -13318,12 +13318,12 @@ define void @test_vsuxseg4_mask_nxv4f32_nxv4i32( %val, float* %base, %index, %mask, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll @@ -221,11 +221,11 @@ define void @test_vsuxseg3_nxv4i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -235,11 +235,11 @@ define void @test_vsuxseg3_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -252,11 +252,11 @@ define void @test_vsuxseg3_nxv4i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -266,11 +266,11 @@ define void @test_vsuxseg3_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -283,11 +283,11 @@ define void @test_vsuxseg3_nxv4i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -297,11 +297,11 @@ define void @test_vsuxseg3_mask_nxv4i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -314,11 +314,11 @@ define void @test_vsuxseg3_nxv4i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -328,11 +328,11 @@ define void @test_vsuxseg3_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -345,12 +345,12 @@ define void @test_vsuxseg4_nxv4i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -360,12 +360,12 @@ define void @test_vsuxseg4_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -378,12 +378,12 @@ define void @test_vsuxseg4_nxv4i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -393,12 +393,12 @@ define void @test_vsuxseg4_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -411,12 +411,12 @@ define void @test_vsuxseg4_nxv4i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -426,12 +426,12 @@ define void @test_vsuxseg4_mask_nxv4i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -444,12 +444,12 @@ define void @test_vsuxseg4_nxv4i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -459,12 +459,12 @@ define void @test_vsuxseg4_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -566,11 +566,11 @@ define void @test_vsuxseg3_nxv16i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -580,11 +580,11 @@ define void @test_vsuxseg3_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -597,11 +597,11 @@ define void @test_vsuxseg3_nxv16i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -611,11 +611,11 @@ define void @test_vsuxseg3_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -659,12 +659,12 @@ define void @test_vsuxseg4_nxv16i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -674,12 +674,12 @@ define void @test_vsuxseg4_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -692,12 +692,12 @@ define void @test_vsuxseg4_nxv16i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -707,12 +707,12 @@ define void @test_vsuxseg4_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -882,11 +882,11 @@ define void @test_vsuxseg3_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -896,11 +896,11 @@ define void @test_vsuxseg3_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -913,11 +913,11 @@ define void @test_vsuxseg3_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -927,11 +927,11 @@ define void @test_vsuxseg3_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -944,11 +944,11 @@ define void @test_vsuxseg3_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -958,11 +958,11 @@ define void @test_vsuxseg3_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -975,11 +975,11 @@ define void @test_vsuxseg3_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -989,11 +989,11 @@ define void @test_vsuxseg3_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1006,12 +1006,12 @@ define void @test_vsuxseg4_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1021,12 +1021,12 @@ define void @test_vsuxseg4_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1039,12 +1039,12 @@ define void @test_vsuxseg4_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1054,12 +1054,12 @@ define void @test_vsuxseg4_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1072,12 +1072,12 @@ define void @test_vsuxseg4_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1087,12 +1087,12 @@ define void @test_vsuxseg4_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1105,12 +1105,12 @@ define void @test_vsuxseg4_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1120,12 +1120,12 @@ define void @test_vsuxseg4_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1138,13 +1138,13 @@ define void @test_vsuxseg5_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1154,13 +1154,13 @@ define void @test_vsuxseg5_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1173,13 +1173,13 @@ define void @test_vsuxseg5_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1189,13 +1189,13 @@ define void @test_vsuxseg5_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1208,13 +1208,13 @@ define void @test_vsuxseg5_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1224,13 +1224,13 @@ define void @test_vsuxseg5_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1243,13 +1243,13 @@ define void @test_vsuxseg5_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1259,13 +1259,13 @@ define void @test_vsuxseg5_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1278,14 +1278,14 @@ define void @test_vsuxseg6_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1295,14 +1295,14 @@ define void @test_vsuxseg6_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1315,14 +1315,14 @@ define void @test_vsuxseg6_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1332,14 +1332,14 @@ define void @test_vsuxseg6_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1352,14 +1352,14 @@ define void @test_vsuxseg6_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1369,14 +1369,14 @@ define void @test_vsuxseg6_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1389,14 +1389,14 @@ define void @test_vsuxseg6_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1406,14 +1406,14 @@ define void @test_vsuxseg6_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1426,15 +1426,15 @@ define void @test_vsuxseg7_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1444,15 +1444,15 @@ define void @test_vsuxseg7_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1465,15 +1465,15 @@ define void @test_vsuxseg7_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1483,15 +1483,15 @@ define void @test_vsuxseg7_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1504,15 +1504,15 @@ define void @test_vsuxseg7_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1522,15 +1522,15 @@ define void @test_vsuxseg7_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1543,15 +1543,15 @@ define void @test_vsuxseg7_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1561,15 +1561,15 @@ define void @test_vsuxseg7_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1582,16 +1582,16 @@ define void @test_vsuxseg8_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1601,16 +1601,16 @@ define void @test_vsuxseg8_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1623,16 +1623,16 @@ define void @test_vsuxseg8_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1642,16 +1642,16 @@ define void @test_vsuxseg8_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1664,16 +1664,16 @@ define void @test_vsuxseg8_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1683,16 +1683,16 @@ define void @test_vsuxseg8_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1705,16 +1705,16 @@ define void @test_vsuxseg8_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1724,16 +1724,16 @@ define void @test_vsuxseg8_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1870,11 +1870,11 @@ define void @test_vsuxseg3_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1884,11 +1884,11 @@ define void @test_vsuxseg3_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1901,11 +1901,11 @@ define void @test_vsuxseg3_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1915,11 +1915,11 @@ define void @test_vsuxseg3_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1932,11 +1932,11 @@ define void @test_vsuxseg3_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1946,11 +1946,11 @@ define void @test_vsuxseg3_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1963,11 +1963,11 @@ define void @test_vsuxseg3_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1977,11 +1977,11 @@ define void @test_vsuxseg3_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1994,12 +1994,12 @@ define void @test_vsuxseg4_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2009,12 +2009,12 @@ define void @test_vsuxseg4_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2027,12 +2027,12 @@ define void @test_vsuxseg4_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2042,12 +2042,12 @@ define void @test_vsuxseg4_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2060,12 +2060,12 @@ define void @test_vsuxseg4_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2075,12 +2075,12 @@ define void @test_vsuxseg4_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2093,12 +2093,12 @@ define void @test_vsuxseg4_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2108,12 +2108,12 @@ define void @test_vsuxseg4_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2126,13 +2126,13 @@ define void @test_vsuxseg5_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2142,13 +2142,13 @@ define void @test_vsuxseg5_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2161,13 +2161,13 @@ define void @test_vsuxseg5_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2177,13 +2177,13 @@ define void @test_vsuxseg5_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2196,13 +2196,13 @@ define void @test_vsuxseg5_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2212,13 +2212,13 @@ define void @test_vsuxseg5_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2231,13 +2231,13 @@ define void @test_vsuxseg5_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2247,13 +2247,13 @@ define void @test_vsuxseg5_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2266,14 +2266,14 @@ define void @test_vsuxseg6_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2283,14 +2283,14 @@ define void @test_vsuxseg6_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2303,14 +2303,14 @@ define void @test_vsuxseg6_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2320,14 +2320,14 @@ define void @test_vsuxseg6_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2340,14 +2340,14 @@ define void @test_vsuxseg6_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2357,14 +2357,14 @@ define void @test_vsuxseg6_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2377,14 +2377,14 @@ define void @test_vsuxseg6_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2394,14 +2394,14 @@ define void @test_vsuxseg6_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2414,15 +2414,15 @@ define void @test_vsuxseg7_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2432,15 +2432,15 @@ define void @test_vsuxseg7_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2453,15 +2453,15 @@ define void @test_vsuxseg7_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2471,15 +2471,15 @@ define void @test_vsuxseg7_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2492,15 +2492,15 @@ define void @test_vsuxseg7_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2510,15 +2510,15 @@ define void @test_vsuxseg7_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2531,15 +2531,15 @@ define void @test_vsuxseg7_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2549,15 +2549,15 @@ define void @test_vsuxseg7_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2570,16 +2570,16 @@ define void @test_vsuxseg8_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2589,16 +2589,16 @@ define void @test_vsuxseg8_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2611,16 +2611,16 @@ define void @test_vsuxseg8_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2630,16 +2630,16 @@ define void @test_vsuxseg8_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2652,16 +2652,16 @@ define void @test_vsuxseg8_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2671,16 +2671,16 @@ define void @test_vsuxseg8_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2693,16 +2693,16 @@ define void @test_vsuxseg8_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2712,16 +2712,16 @@ define void @test_vsuxseg8_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2854,11 +2854,11 @@ define void @test_vsuxseg3_nxv8i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2868,11 +2868,11 @@ define void @test_vsuxseg3_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2885,11 +2885,11 @@ define void @test_vsuxseg3_nxv8i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2899,11 +2899,11 @@ define void @test_vsuxseg3_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2947,11 +2947,11 @@ define void @test_vsuxseg3_nxv8i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2961,11 +2961,11 @@ define void @test_vsuxseg3_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2978,12 +2978,12 @@ define void @test_vsuxseg4_nxv8i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2993,12 +2993,12 @@ define void @test_vsuxseg4_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -3011,12 +3011,12 @@ define void @test_vsuxseg4_nxv8i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -3026,12 +3026,12 @@ define void @test_vsuxseg4_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -3077,12 +3077,12 @@ define void @test_vsuxseg4_nxv8i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -3092,12 +3092,12 @@ define void @test_vsuxseg4_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -3230,11 +3230,11 @@ define void @test_vsuxseg3_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3244,11 +3244,11 @@ define void @test_vsuxseg3_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3261,11 +3261,11 @@ define void @test_vsuxseg3_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3275,11 +3275,11 @@ define void @test_vsuxseg3_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3323,11 +3323,11 @@ define void @test_vsuxseg3_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3337,11 +3337,11 @@ define void @test_vsuxseg3_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3354,12 +3354,12 @@ define void @test_vsuxseg4_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3369,12 +3369,12 @@ define void @test_vsuxseg4_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3387,12 +3387,12 @@ define void @test_vsuxseg4_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3402,12 +3402,12 @@ define void @test_vsuxseg4_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3453,12 +3453,12 @@ define void @test_vsuxseg4_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3468,12 +3468,12 @@ define void @test_vsuxseg4_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3486,13 +3486,13 @@ define void @test_vsuxseg5_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3502,13 +3502,13 @@ define void @test_vsuxseg5_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3521,13 +3521,13 @@ define void @test_vsuxseg5_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3537,13 +3537,13 @@ define void @test_vsuxseg5_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3556,13 +3556,13 @@ define void @test_vsuxseg5_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3572,13 +3572,13 @@ define void @test_vsuxseg5_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3591,13 +3591,13 @@ define void @test_vsuxseg5_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3607,13 +3607,13 @@ define void @test_vsuxseg5_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3626,14 +3626,14 @@ define void @test_vsuxseg6_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3643,14 +3643,14 @@ define void @test_vsuxseg6_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3663,14 +3663,14 @@ define void @test_vsuxseg6_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3680,14 +3680,14 @@ define void @test_vsuxseg6_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3700,14 +3700,14 @@ define void @test_vsuxseg6_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3717,14 +3717,14 @@ define void @test_vsuxseg6_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3737,14 +3737,14 @@ define void @test_vsuxseg6_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3754,14 +3754,14 @@ define void @test_vsuxseg6_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3774,15 +3774,15 @@ define void @test_vsuxseg7_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3792,15 +3792,15 @@ define void @test_vsuxseg7_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3813,15 +3813,15 @@ define void @test_vsuxseg7_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3831,15 +3831,15 @@ define void @test_vsuxseg7_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3852,15 +3852,15 @@ define void @test_vsuxseg7_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3870,15 +3870,15 @@ define void @test_vsuxseg7_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3891,15 +3891,15 @@ define void @test_vsuxseg7_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3909,15 +3909,15 @@ define void @test_vsuxseg7_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3930,16 +3930,16 @@ define void @test_vsuxseg8_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3949,16 +3949,16 @@ define void @test_vsuxseg8_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3971,16 +3971,16 @@ define void @test_vsuxseg8_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3990,16 +3990,16 @@ define void @test_vsuxseg8_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -4012,16 +4012,16 @@ define void @test_vsuxseg8_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -4031,16 +4031,16 @@ define void @test_vsuxseg8_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -4053,16 +4053,16 @@ define void @test_vsuxseg8_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -4072,16 +4072,16 @@ define void @test_vsuxseg8_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -4218,11 +4218,11 @@ define void @test_vsuxseg3_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4232,11 +4232,11 @@ define void @test_vsuxseg3_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4249,11 +4249,11 @@ define void @test_vsuxseg3_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4263,11 +4263,11 @@ define void @test_vsuxseg3_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4280,11 +4280,11 @@ define void @test_vsuxseg3_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4294,11 +4294,11 @@ define void @test_vsuxseg3_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4311,11 +4311,11 @@ define void @test_vsuxseg3_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4325,11 +4325,11 @@ define void @test_vsuxseg3_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4342,12 +4342,12 @@ define void @test_vsuxseg4_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4357,12 +4357,12 @@ define void @test_vsuxseg4_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4375,12 +4375,12 @@ define void @test_vsuxseg4_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4390,12 +4390,12 @@ define void @test_vsuxseg4_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4408,12 +4408,12 @@ define void @test_vsuxseg4_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4423,12 +4423,12 @@ define void @test_vsuxseg4_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4441,12 +4441,12 @@ define void @test_vsuxseg4_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4456,12 +4456,12 @@ define void @test_vsuxseg4_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4474,13 +4474,13 @@ define void @test_vsuxseg5_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4490,13 +4490,13 @@ define void @test_vsuxseg5_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4509,13 +4509,13 @@ define void @test_vsuxseg5_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4525,13 +4525,13 @@ define void @test_vsuxseg5_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4544,13 +4544,13 @@ define void @test_vsuxseg5_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4560,13 +4560,13 @@ define void @test_vsuxseg5_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4579,13 +4579,13 @@ define void @test_vsuxseg5_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4595,13 +4595,13 @@ define void @test_vsuxseg5_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4614,14 +4614,14 @@ define void @test_vsuxseg6_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4631,14 +4631,14 @@ define void @test_vsuxseg6_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4651,14 +4651,14 @@ define void @test_vsuxseg6_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4668,14 +4668,14 @@ define void @test_vsuxseg6_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4688,14 +4688,14 @@ define void @test_vsuxseg6_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4705,14 +4705,14 @@ define void @test_vsuxseg6_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4725,14 +4725,14 @@ define void @test_vsuxseg6_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4742,14 +4742,14 @@ define void @test_vsuxseg6_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4762,15 +4762,15 @@ define void @test_vsuxseg7_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4780,15 +4780,15 @@ define void @test_vsuxseg7_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4801,15 +4801,15 @@ define void @test_vsuxseg7_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4819,15 +4819,15 @@ define void @test_vsuxseg7_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4840,15 +4840,15 @@ define void @test_vsuxseg7_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4858,15 +4858,15 @@ define void @test_vsuxseg7_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4879,15 +4879,15 @@ define void @test_vsuxseg7_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4897,15 +4897,15 @@ define void @test_vsuxseg7_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4918,16 +4918,16 @@ define void @test_vsuxseg8_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4937,16 +4937,16 @@ define void @test_vsuxseg8_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4959,16 +4959,16 @@ define void @test_vsuxseg8_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4978,16 +4978,16 @@ define void @test_vsuxseg8_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -5000,16 +5000,16 @@ define void @test_vsuxseg8_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -5019,16 +5019,16 @@ define void @test_vsuxseg8_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -5041,16 +5041,16 @@ define void @test_vsuxseg8_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -5060,16 +5060,16 @@ define void @test_vsuxseg8_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -5204,11 +5204,11 @@ define void @test_vsuxseg3_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5218,11 +5218,11 @@ define void @test_vsuxseg3_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5235,11 +5235,11 @@ define void @test_vsuxseg3_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5249,11 +5249,11 @@ define void @test_vsuxseg3_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5266,11 +5266,11 @@ define void @test_vsuxseg3_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5280,11 +5280,11 @@ define void @test_vsuxseg3_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5297,11 +5297,11 @@ define void @test_vsuxseg3_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5311,11 +5311,11 @@ define void @test_vsuxseg3_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5328,12 +5328,12 @@ define void @test_vsuxseg4_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5343,12 +5343,12 @@ define void @test_vsuxseg4_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5361,12 +5361,12 @@ define void @test_vsuxseg4_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5376,12 +5376,12 @@ define void @test_vsuxseg4_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5394,12 +5394,12 @@ define void @test_vsuxseg4_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5409,12 +5409,12 @@ define void @test_vsuxseg4_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5427,12 +5427,12 @@ define void @test_vsuxseg4_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5442,12 +5442,12 @@ define void @test_vsuxseg4_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5460,13 +5460,13 @@ define void @test_vsuxseg5_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5476,13 +5476,13 @@ define void @test_vsuxseg5_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5495,13 +5495,13 @@ define void @test_vsuxseg5_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5511,13 +5511,13 @@ define void @test_vsuxseg5_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5530,13 +5530,13 @@ define void @test_vsuxseg5_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5546,13 +5546,13 @@ define void @test_vsuxseg5_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5565,13 +5565,13 @@ define void @test_vsuxseg5_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5581,13 +5581,13 @@ define void @test_vsuxseg5_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5600,14 +5600,14 @@ define void @test_vsuxseg6_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5617,14 +5617,14 @@ define void @test_vsuxseg6_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5637,14 +5637,14 @@ define void @test_vsuxseg6_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5654,14 +5654,14 @@ define void @test_vsuxseg6_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5674,14 +5674,14 @@ define void @test_vsuxseg6_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5691,14 +5691,14 @@ define void @test_vsuxseg6_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5711,14 +5711,14 @@ define void @test_vsuxseg6_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5728,14 +5728,14 @@ define void @test_vsuxseg6_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5748,15 +5748,15 @@ define void @test_vsuxseg7_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5766,15 +5766,15 @@ define void @test_vsuxseg7_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5787,15 +5787,15 @@ define void @test_vsuxseg7_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5805,15 +5805,15 @@ define void @test_vsuxseg7_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5826,15 +5826,15 @@ define void @test_vsuxseg7_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5844,15 +5844,15 @@ define void @test_vsuxseg7_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5865,15 +5865,15 @@ define void @test_vsuxseg7_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5883,15 +5883,15 @@ define void @test_vsuxseg7_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5904,16 +5904,16 @@ define void @test_vsuxseg8_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5923,16 +5923,16 @@ define void @test_vsuxseg8_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5945,16 +5945,16 @@ define void @test_vsuxseg8_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5964,16 +5964,16 @@ define void @test_vsuxseg8_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5986,16 +5986,16 @@ define void @test_vsuxseg8_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -6005,16 +6005,16 @@ define void @test_vsuxseg8_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -6027,16 +6027,16 @@ define void @test_vsuxseg8_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -6046,16 +6046,16 @@ define void @test_vsuxseg8_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -6186,11 +6186,11 @@ define void @test_vsuxseg3_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6200,11 +6200,11 @@ define void @test_vsuxseg3_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6217,11 +6217,11 @@ define void @test_vsuxseg3_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6231,11 +6231,11 @@ define void @test_vsuxseg3_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6310,12 +6310,12 @@ define void @test_vsuxseg4_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6325,12 +6325,12 @@ define void @test_vsuxseg4_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6343,12 +6343,12 @@ define void @test_vsuxseg4_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6358,12 +6358,12 @@ define void @test_vsuxseg4_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6442,13 +6442,13 @@ define void @test_vsuxseg5_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6458,13 +6458,13 @@ define void @test_vsuxseg5_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6477,13 +6477,13 @@ define void @test_vsuxseg5_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6493,13 +6493,13 @@ define void @test_vsuxseg5_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6547,13 +6547,13 @@ define void @test_vsuxseg5_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6563,13 +6563,13 @@ define void @test_vsuxseg5_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6582,14 +6582,14 @@ define void @test_vsuxseg6_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6599,14 +6599,14 @@ define void @test_vsuxseg6_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6619,14 +6619,14 @@ define void @test_vsuxseg6_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6636,14 +6636,14 @@ define void @test_vsuxseg6_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6693,14 +6693,14 @@ define void @test_vsuxseg6_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6710,14 +6710,14 @@ define void @test_vsuxseg6_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6730,15 +6730,15 @@ define void @test_vsuxseg7_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6748,15 +6748,15 @@ define void @test_vsuxseg7_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6769,15 +6769,15 @@ define void @test_vsuxseg7_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6787,15 +6787,15 @@ define void @test_vsuxseg7_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6847,15 +6847,15 @@ define void @test_vsuxseg7_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6865,15 +6865,15 @@ define void @test_vsuxseg7_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6886,16 +6886,16 @@ define void @test_vsuxseg8_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6905,16 +6905,16 @@ define void @test_vsuxseg8_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6927,16 +6927,16 @@ define void @test_vsuxseg8_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6946,16 +6946,16 @@ define void @test_vsuxseg8_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -7009,16 +7009,16 @@ define void @test_vsuxseg8_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -7028,16 +7028,16 @@ define void @test_vsuxseg8_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -7294,11 +7294,11 @@ define void @test_vsuxseg3_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7308,11 +7308,11 @@ define void @test_vsuxseg3_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7325,11 +7325,11 @@ define void @test_vsuxseg3_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7339,11 +7339,11 @@ define void @test_vsuxseg3_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7387,11 +7387,11 @@ define void @test_vsuxseg3_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7401,11 +7401,11 @@ define void @test_vsuxseg3_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7418,12 +7418,12 @@ define void @test_vsuxseg4_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7433,12 +7433,12 @@ define void @test_vsuxseg4_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7451,12 +7451,12 @@ define void @test_vsuxseg4_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7466,12 +7466,12 @@ define void @test_vsuxseg4_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7517,12 +7517,12 @@ define void @test_vsuxseg4_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7532,12 +7532,12 @@ define void @test_vsuxseg4_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7550,13 +7550,13 @@ define void @test_vsuxseg5_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7566,13 +7566,13 @@ define void @test_vsuxseg5_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7585,13 +7585,13 @@ define void @test_vsuxseg5_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7601,13 +7601,13 @@ define void @test_vsuxseg5_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7620,13 +7620,13 @@ define void @test_vsuxseg5_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7636,13 +7636,13 @@ define void @test_vsuxseg5_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7655,13 +7655,13 @@ define void @test_vsuxseg5_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7671,13 +7671,13 @@ define void @test_vsuxseg5_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7690,14 +7690,14 @@ define void @test_vsuxseg6_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7707,14 +7707,14 @@ define void @test_vsuxseg6_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7727,14 +7727,14 @@ define void @test_vsuxseg6_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7744,14 +7744,14 @@ define void @test_vsuxseg6_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7764,14 +7764,14 @@ define void @test_vsuxseg6_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7781,14 +7781,14 @@ define void @test_vsuxseg6_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7801,14 +7801,14 @@ define void @test_vsuxseg6_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7818,14 +7818,14 @@ define void @test_vsuxseg6_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7838,15 +7838,15 @@ define void @test_vsuxseg7_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7856,15 +7856,15 @@ define void @test_vsuxseg7_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7877,15 +7877,15 @@ define void @test_vsuxseg7_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7895,15 +7895,15 @@ define void @test_vsuxseg7_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7916,15 +7916,15 @@ define void @test_vsuxseg7_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7934,15 +7934,15 @@ define void @test_vsuxseg7_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7955,15 +7955,15 @@ define void @test_vsuxseg7_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7973,15 +7973,15 @@ define void @test_vsuxseg7_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7994,16 +7994,16 @@ define void @test_vsuxseg8_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8013,16 +8013,16 @@ define void @test_vsuxseg8_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8035,16 +8035,16 @@ define void @test_vsuxseg8_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8054,16 +8054,16 @@ define void @test_vsuxseg8_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8076,16 +8076,16 @@ define void @test_vsuxseg8_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8095,16 +8095,16 @@ define void @test_vsuxseg8_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8117,16 +8117,16 @@ define void @test_vsuxseg8_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8136,16 +8136,16 @@ define void @test_vsuxseg8_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8282,11 +8282,11 @@ define void @test_vsuxseg3_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8296,11 +8296,11 @@ define void @test_vsuxseg3_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8313,11 +8313,11 @@ define void @test_vsuxseg3_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8327,11 +8327,11 @@ define void @test_vsuxseg3_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8344,11 +8344,11 @@ define void @test_vsuxseg3_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8358,11 +8358,11 @@ define void @test_vsuxseg3_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8375,11 +8375,11 @@ define void @test_vsuxseg3_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8389,11 +8389,11 @@ define void @test_vsuxseg3_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8406,12 +8406,12 @@ define void @test_vsuxseg4_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8421,12 +8421,12 @@ define void @test_vsuxseg4_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8439,12 +8439,12 @@ define void @test_vsuxseg4_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8454,12 +8454,12 @@ define void @test_vsuxseg4_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8472,12 +8472,12 @@ define void @test_vsuxseg4_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8487,12 +8487,12 @@ define void @test_vsuxseg4_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8505,12 +8505,12 @@ define void @test_vsuxseg4_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8520,12 +8520,12 @@ define void @test_vsuxseg4_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8538,13 +8538,13 @@ define void @test_vsuxseg5_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8554,13 +8554,13 @@ define void @test_vsuxseg5_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8573,13 +8573,13 @@ define void @test_vsuxseg5_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8589,13 +8589,13 @@ define void @test_vsuxseg5_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8608,13 +8608,13 @@ define void @test_vsuxseg5_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8624,13 +8624,13 @@ define void @test_vsuxseg5_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8643,13 +8643,13 @@ define void @test_vsuxseg5_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8659,13 +8659,13 @@ define void @test_vsuxseg5_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8678,14 +8678,14 @@ define void @test_vsuxseg6_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8695,14 +8695,14 @@ define void @test_vsuxseg6_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8715,14 +8715,14 @@ define void @test_vsuxseg6_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8732,14 +8732,14 @@ define void @test_vsuxseg6_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8752,14 +8752,14 @@ define void @test_vsuxseg6_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8769,14 +8769,14 @@ define void @test_vsuxseg6_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8789,14 +8789,14 @@ define void @test_vsuxseg6_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8806,14 +8806,14 @@ define void @test_vsuxseg6_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8826,15 +8826,15 @@ define void @test_vsuxseg7_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8844,15 +8844,15 @@ define void @test_vsuxseg7_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8865,15 +8865,15 @@ define void @test_vsuxseg7_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8883,15 +8883,15 @@ define void @test_vsuxseg7_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8904,15 +8904,15 @@ define void @test_vsuxseg7_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8922,15 +8922,15 @@ define void @test_vsuxseg7_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8943,15 +8943,15 @@ define void @test_vsuxseg7_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8961,15 +8961,15 @@ define void @test_vsuxseg7_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8982,16 +8982,16 @@ define void @test_vsuxseg8_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9001,16 +9001,16 @@ define void @test_vsuxseg8_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9023,16 +9023,16 @@ define void @test_vsuxseg8_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9042,16 +9042,16 @@ define void @test_vsuxseg8_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9064,16 +9064,16 @@ define void @test_vsuxseg8_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9083,16 +9083,16 @@ define void @test_vsuxseg8_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9105,16 +9105,16 @@ define void @test_vsuxseg8_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9124,16 +9124,16 @@ define void @test_vsuxseg8_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9268,11 +9268,11 @@ define void @test_vsuxseg3_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9282,11 +9282,11 @@ define void @test_vsuxseg3_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9299,11 +9299,11 @@ define void @test_vsuxseg3_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9313,11 +9313,11 @@ define void @test_vsuxseg3_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9330,11 +9330,11 @@ define void @test_vsuxseg3_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9344,11 +9344,11 @@ define void @test_vsuxseg3_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9361,11 +9361,11 @@ define void @test_vsuxseg3_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9375,11 +9375,11 @@ define void @test_vsuxseg3_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9392,12 +9392,12 @@ define void @test_vsuxseg4_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9407,12 +9407,12 @@ define void @test_vsuxseg4_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9425,12 +9425,12 @@ define void @test_vsuxseg4_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9440,12 +9440,12 @@ define void @test_vsuxseg4_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9458,12 +9458,12 @@ define void @test_vsuxseg4_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9473,12 +9473,12 @@ define void @test_vsuxseg4_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9491,12 +9491,12 @@ define void @test_vsuxseg4_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9506,12 +9506,12 @@ define void @test_vsuxseg4_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9524,13 +9524,13 @@ define void @test_vsuxseg5_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9540,13 +9540,13 @@ define void @test_vsuxseg5_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9559,13 +9559,13 @@ define void @test_vsuxseg5_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9575,13 +9575,13 @@ define void @test_vsuxseg5_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9594,13 +9594,13 @@ define void @test_vsuxseg5_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9610,13 +9610,13 @@ define void @test_vsuxseg5_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9629,13 +9629,13 @@ define void @test_vsuxseg5_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9645,13 +9645,13 @@ define void @test_vsuxseg5_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9664,14 +9664,14 @@ define void @test_vsuxseg6_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9681,14 +9681,14 @@ define void @test_vsuxseg6_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9701,14 +9701,14 @@ define void @test_vsuxseg6_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9718,14 +9718,14 @@ define void @test_vsuxseg6_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9738,14 +9738,14 @@ define void @test_vsuxseg6_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9755,14 +9755,14 @@ define void @test_vsuxseg6_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9775,14 +9775,14 @@ define void @test_vsuxseg6_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9792,14 +9792,14 @@ define void @test_vsuxseg6_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9812,15 +9812,15 @@ define void @test_vsuxseg7_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9830,15 +9830,15 @@ define void @test_vsuxseg7_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9851,15 +9851,15 @@ define void @test_vsuxseg7_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9869,15 +9869,15 @@ define void @test_vsuxseg7_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9890,15 +9890,15 @@ define void @test_vsuxseg7_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9908,15 +9908,15 @@ define void @test_vsuxseg7_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9929,15 +9929,15 @@ define void @test_vsuxseg7_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9947,15 +9947,15 @@ define void @test_vsuxseg7_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9968,16 +9968,16 @@ define void @test_vsuxseg8_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9987,16 +9987,16 @@ define void @test_vsuxseg8_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10009,16 +10009,16 @@ define void @test_vsuxseg8_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -10028,16 +10028,16 @@ define void @test_vsuxseg8_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10050,16 +10050,16 @@ define void @test_vsuxseg8_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -10069,16 +10069,16 @@ define void @test_vsuxseg8_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10091,16 +10091,16 @@ define void @test_vsuxseg8_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -10110,16 +10110,16 @@ define void @test_vsuxseg8_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10436,11 +10436,11 @@ define void @test_vsuxseg3_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10450,11 +10450,11 @@ define void @test_vsuxseg3_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10467,11 +10467,11 @@ define void @test_vsuxseg3_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10481,11 +10481,11 @@ define void @test_vsuxseg3_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10498,11 +10498,11 @@ define void @test_vsuxseg3_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10512,11 +10512,11 @@ define void @test_vsuxseg3_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10529,11 +10529,11 @@ define void @test_vsuxseg3_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10543,11 +10543,11 @@ define void @test_vsuxseg3_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10560,12 +10560,12 @@ define void @test_vsuxseg4_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10575,12 +10575,12 @@ define void @test_vsuxseg4_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10593,12 +10593,12 @@ define void @test_vsuxseg4_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10608,12 +10608,12 @@ define void @test_vsuxseg4_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10626,12 +10626,12 @@ define void @test_vsuxseg4_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10641,12 +10641,12 @@ define void @test_vsuxseg4_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10659,12 +10659,12 @@ define void @test_vsuxseg4_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10674,12 +10674,12 @@ define void @test_vsuxseg4_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10692,13 +10692,13 @@ define void @test_vsuxseg5_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10708,13 +10708,13 @@ define void @test_vsuxseg5_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10727,13 +10727,13 @@ define void @test_vsuxseg5_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10743,13 +10743,13 @@ define void @test_vsuxseg5_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10762,13 +10762,13 @@ define void @test_vsuxseg5_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10778,13 +10778,13 @@ define void @test_vsuxseg5_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10797,13 +10797,13 @@ define void @test_vsuxseg5_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10813,13 +10813,13 @@ define void @test_vsuxseg5_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10832,14 +10832,14 @@ define void @test_vsuxseg6_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10849,14 +10849,14 @@ define void @test_vsuxseg6_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10869,14 +10869,14 @@ define void @test_vsuxseg6_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10886,14 +10886,14 @@ define void @test_vsuxseg6_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10906,14 +10906,14 @@ define void @test_vsuxseg6_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10923,14 +10923,14 @@ define void @test_vsuxseg6_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10943,14 +10943,14 @@ define void @test_vsuxseg6_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10960,14 +10960,14 @@ define void @test_vsuxseg6_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10980,15 +10980,15 @@ define void @test_vsuxseg7_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10998,15 +10998,15 @@ define void @test_vsuxseg7_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11019,15 +11019,15 @@ define void @test_vsuxseg7_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11037,15 +11037,15 @@ define void @test_vsuxseg7_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11058,15 +11058,15 @@ define void @test_vsuxseg7_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11076,15 +11076,15 @@ define void @test_vsuxseg7_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11097,15 +11097,15 @@ define void @test_vsuxseg7_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11115,15 +11115,15 @@ define void @test_vsuxseg7_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11136,16 +11136,16 @@ define void @test_vsuxseg8_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11155,16 +11155,16 @@ define void @test_vsuxseg8_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11177,16 +11177,16 @@ define void @test_vsuxseg8_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11196,16 +11196,16 @@ define void @test_vsuxseg8_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11218,16 +11218,16 @@ define void @test_vsuxseg8_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11237,16 +11237,16 @@ define void @test_vsuxseg8_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11259,16 +11259,16 @@ define void @test_vsuxseg8_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11278,16 +11278,16 @@ define void @test_vsuxseg8_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11424,11 +11424,11 @@ define void @test_vsuxseg3_nxv2i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11438,11 +11438,11 @@ define void @test_vsuxseg3_mask_nxv2i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11455,11 +11455,11 @@ define void @test_vsuxseg3_nxv2i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11469,11 +11469,11 @@ define void @test_vsuxseg3_mask_nxv2i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11486,11 +11486,11 @@ define void @test_vsuxseg3_nxv2i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11500,11 +11500,11 @@ define void @test_vsuxseg3_mask_nxv2i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11517,11 +11517,11 @@ define void @test_vsuxseg3_nxv2i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11531,11 +11531,11 @@ define void @test_vsuxseg3_mask_nxv2i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11548,12 +11548,12 @@ define void @test_vsuxseg4_nxv2i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11563,12 +11563,12 @@ define void @test_vsuxseg4_mask_nxv2i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11581,12 +11581,12 @@ define void @test_vsuxseg4_nxv2i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11596,12 +11596,12 @@ define void @test_vsuxseg4_mask_nxv2i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11614,12 +11614,12 @@ define void @test_vsuxseg4_nxv2i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11629,12 +11629,12 @@ define void @test_vsuxseg4_mask_nxv2i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11647,12 +11647,12 @@ define void @test_vsuxseg4_nxv2i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11662,12 +11662,12 @@ define void @test_vsuxseg4_mask_nxv2i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -12019,11 +12019,11 @@ define void @test_vsuxseg3_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12033,11 +12033,11 @@ define void @test_vsuxseg3_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12050,11 +12050,11 @@ define void @test_vsuxseg3_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12064,11 +12064,11 @@ define void @test_vsuxseg3_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12081,11 +12081,11 @@ define void @test_vsuxseg3_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12095,11 +12095,11 @@ define void @test_vsuxseg3_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12112,11 +12112,11 @@ define void @test_vsuxseg3_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12126,11 +12126,11 @@ define void @test_vsuxseg3_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12143,12 +12143,12 @@ define void @test_vsuxseg4_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12158,12 +12158,12 @@ define void @test_vsuxseg4_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12176,12 +12176,12 @@ define void @test_vsuxseg4_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12191,12 +12191,12 @@ define void @test_vsuxseg4_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12209,12 +12209,12 @@ define void @test_vsuxseg4_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12224,12 +12224,12 @@ define void @test_vsuxseg4_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12242,12 +12242,12 @@ define void @test_vsuxseg4_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12257,12 +12257,12 @@ define void @test_vsuxseg4_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12275,13 +12275,13 @@ define void @test_vsuxseg5_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12291,13 +12291,13 @@ define void @test_vsuxseg5_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12310,13 +12310,13 @@ define void @test_vsuxseg5_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12326,13 +12326,13 @@ define void @test_vsuxseg5_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12345,13 +12345,13 @@ define void @test_vsuxseg5_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12361,13 +12361,13 @@ define void @test_vsuxseg5_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12380,13 +12380,13 @@ define void @test_vsuxseg5_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12396,13 +12396,13 @@ define void @test_vsuxseg5_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12415,14 +12415,14 @@ define void @test_vsuxseg6_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12432,14 +12432,14 @@ define void @test_vsuxseg6_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12452,14 +12452,14 @@ define void @test_vsuxseg6_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12469,14 +12469,14 @@ define void @test_vsuxseg6_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12489,14 +12489,14 @@ define void @test_vsuxseg6_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12506,14 +12506,14 @@ define void @test_vsuxseg6_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12526,14 +12526,14 @@ define void @test_vsuxseg6_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12543,14 +12543,14 @@ define void @test_vsuxseg6_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12563,15 +12563,15 @@ define void @test_vsuxseg7_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12581,15 +12581,15 @@ define void @test_vsuxseg7_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12602,15 +12602,15 @@ define void @test_vsuxseg7_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12620,15 +12620,15 @@ define void @test_vsuxseg7_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12641,15 +12641,15 @@ define void @test_vsuxseg7_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12659,15 +12659,15 @@ define void @test_vsuxseg7_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12680,15 +12680,15 @@ define void @test_vsuxseg7_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12698,15 +12698,15 @@ define void @test_vsuxseg7_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12719,16 +12719,16 @@ define void @test_vsuxseg8_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12738,16 +12738,16 @@ define void @test_vsuxseg8_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12760,16 +12760,16 @@ define void @test_vsuxseg8_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12779,16 +12779,16 @@ define void @test_vsuxseg8_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12801,16 +12801,16 @@ define void @test_vsuxseg8_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12820,16 +12820,16 @@ define void @test_vsuxseg8_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12842,16 +12842,16 @@ define void @test_vsuxseg8_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12861,16 +12861,16 @@ define void @test_vsuxseg8_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -13005,11 +13005,11 @@ define void @test_vsuxseg3_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13019,11 +13019,11 @@ define void @test_vsuxseg3_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13036,11 +13036,11 @@ define void @test_vsuxseg3_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13050,11 +13050,11 @@ define void @test_vsuxseg3_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13067,11 +13067,11 @@ define void @test_vsuxseg3_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13081,11 +13081,11 @@ define void @test_vsuxseg3_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13098,11 +13098,11 @@ define void @test_vsuxseg3_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13112,11 +13112,11 @@ define void @test_vsuxseg3_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13129,12 +13129,12 @@ define void @test_vsuxseg4_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13144,12 +13144,12 @@ define void @test_vsuxseg4_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13162,12 +13162,12 @@ define void @test_vsuxseg4_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13177,12 +13177,12 @@ define void @test_vsuxseg4_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13195,12 +13195,12 @@ define void @test_vsuxseg4_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13210,12 +13210,12 @@ define void @test_vsuxseg4_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13228,12 +13228,12 @@ define void @test_vsuxseg4_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13243,12 +13243,12 @@ define void @test_vsuxseg4_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13261,13 +13261,13 @@ define void @test_vsuxseg5_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13277,13 +13277,13 @@ define void @test_vsuxseg5_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13296,13 +13296,13 @@ define void @test_vsuxseg5_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13312,13 +13312,13 @@ define void @test_vsuxseg5_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13331,13 +13331,13 @@ define void @test_vsuxseg5_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13347,13 +13347,13 @@ define void @test_vsuxseg5_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13366,13 +13366,13 @@ define void @test_vsuxseg5_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13382,13 +13382,13 @@ define void @test_vsuxseg5_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13401,14 +13401,14 @@ define void @test_vsuxseg6_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13418,14 +13418,14 @@ define void @test_vsuxseg6_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13438,14 +13438,14 @@ define void @test_vsuxseg6_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13455,14 +13455,14 @@ define void @test_vsuxseg6_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13475,14 +13475,14 @@ define void @test_vsuxseg6_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13492,14 +13492,14 @@ define void @test_vsuxseg6_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13512,14 +13512,14 @@ define void @test_vsuxseg6_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13529,14 +13529,14 @@ define void @test_vsuxseg6_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13549,15 +13549,15 @@ define void @test_vsuxseg7_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13567,15 +13567,15 @@ define void @test_vsuxseg7_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13588,15 +13588,15 @@ define void @test_vsuxseg7_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13606,15 +13606,15 @@ define void @test_vsuxseg7_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13627,15 +13627,15 @@ define void @test_vsuxseg7_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13645,15 +13645,15 @@ define void @test_vsuxseg7_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13666,15 +13666,15 @@ define void @test_vsuxseg7_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13684,15 +13684,15 @@ define void @test_vsuxseg7_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13705,16 +13705,16 @@ define void @test_vsuxseg8_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13724,16 +13724,16 @@ define void @test_vsuxseg8_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13746,16 +13746,16 @@ define void @test_vsuxseg8_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13765,16 +13765,16 @@ define void @test_vsuxseg8_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13787,16 +13787,16 @@ define void @test_vsuxseg8_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13806,16 +13806,16 @@ define void @test_vsuxseg8_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13828,16 +13828,16 @@ define void @test_vsuxseg8_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13847,16 +13847,16 @@ define void @test_vsuxseg8_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13993,11 +13993,11 @@ define void @test_vsuxseg3_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14007,11 +14007,11 @@ define void @test_vsuxseg3_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14024,11 +14024,11 @@ define void @test_vsuxseg3_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14038,11 +14038,11 @@ define void @test_vsuxseg3_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14055,11 +14055,11 @@ define void @test_vsuxseg3_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14069,11 +14069,11 @@ define void @test_vsuxseg3_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14086,11 +14086,11 @@ define void @test_vsuxseg3_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14100,11 +14100,11 @@ define void @test_vsuxseg3_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14117,12 +14117,12 @@ define void @test_vsuxseg4_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14132,12 +14132,12 @@ define void @test_vsuxseg4_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14150,12 +14150,12 @@ define void @test_vsuxseg4_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14165,12 +14165,12 @@ define void @test_vsuxseg4_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14183,12 +14183,12 @@ define void @test_vsuxseg4_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14198,12 +14198,12 @@ define void @test_vsuxseg4_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14216,12 +14216,12 @@ define void @test_vsuxseg4_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14231,12 +14231,12 @@ define void @test_vsuxseg4_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14249,13 +14249,13 @@ define void @test_vsuxseg5_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14265,13 +14265,13 @@ define void @test_vsuxseg5_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14284,13 +14284,13 @@ define void @test_vsuxseg5_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14300,13 +14300,13 @@ define void @test_vsuxseg5_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14319,13 +14319,13 @@ define void @test_vsuxseg5_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14335,13 +14335,13 @@ define void @test_vsuxseg5_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14354,13 +14354,13 @@ define void @test_vsuxseg5_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14370,13 +14370,13 @@ define void @test_vsuxseg5_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14389,14 +14389,14 @@ define void @test_vsuxseg6_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14406,14 +14406,14 @@ define void @test_vsuxseg6_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14426,14 +14426,14 @@ define void @test_vsuxseg6_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14443,14 +14443,14 @@ define void @test_vsuxseg6_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14463,14 +14463,14 @@ define void @test_vsuxseg6_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14480,14 +14480,14 @@ define void @test_vsuxseg6_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14500,14 +14500,14 @@ define void @test_vsuxseg6_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14517,14 +14517,14 @@ define void @test_vsuxseg6_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14537,15 +14537,15 @@ define void @test_vsuxseg7_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14555,15 +14555,15 @@ define void @test_vsuxseg7_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14576,15 +14576,15 @@ define void @test_vsuxseg7_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14594,15 +14594,15 @@ define void @test_vsuxseg7_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14615,15 +14615,15 @@ define void @test_vsuxseg7_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14633,15 +14633,15 @@ define void @test_vsuxseg7_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14654,15 +14654,15 @@ define void @test_vsuxseg7_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14672,15 +14672,15 @@ define void @test_vsuxseg7_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14693,16 +14693,16 @@ define void @test_vsuxseg8_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14712,16 +14712,16 @@ define void @test_vsuxseg8_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14734,16 +14734,16 @@ define void @test_vsuxseg8_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14753,16 +14753,16 @@ define void @test_vsuxseg8_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14775,16 +14775,16 @@ define void @test_vsuxseg8_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14794,16 +14794,16 @@ define void @test_vsuxseg8_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14816,16 +14816,16 @@ define void @test_vsuxseg8_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14835,16 +14835,16 @@ define void @test_vsuxseg8_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14981,11 +14981,11 @@ define void @test_vsuxseg3_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, i64 %vl) @@ -14995,11 +14995,11 @@ define void @test_vsuxseg3_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15012,11 +15012,11 @@ define void @test_vsuxseg3_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, i64 %vl) @@ -15026,11 +15026,11 @@ define void @test_vsuxseg3_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15043,11 +15043,11 @@ define void @test_vsuxseg3_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, i64 %vl) @@ -15057,11 +15057,11 @@ define void @test_vsuxseg3_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15074,11 +15074,11 @@ define void @test_vsuxseg3_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, i64 %vl) @@ -15088,11 +15088,11 @@ define void @test_vsuxseg3_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15105,12 +15105,12 @@ define void @test_vsuxseg4_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15120,12 +15120,12 @@ define void @test_vsuxseg4_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15138,12 +15138,12 @@ define void @test_vsuxseg4_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15153,12 +15153,12 @@ define void @test_vsuxseg4_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15171,12 +15171,12 @@ define void @test_vsuxseg4_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15186,12 +15186,12 @@ define void @test_vsuxseg4_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15204,12 +15204,12 @@ define void @test_vsuxseg4_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15219,12 +15219,12 @@ define void @test_vsuxseg4_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15237,13 +15237,13 @@ define void @test_vsuxseg5_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15253,13 +15253,13 @@ define void @test_vsuxseg5_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15272,13 +15272,13 @@ define void @test_vsuxseg5_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15288,13 +15288,13 @@ define void @test_vsuxseg5_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15307,13 +15307,13 @@ define void @test_vsuxseg5_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15323,13 +15323,13 @@ define void @test_vsuxseg5_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15342,13 +15342,13 @@ define void @test_vsuxseg5_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15358,13 +15358,13 @@ define void @test_vsuxseg5_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15377,14 +15377,14 @@ define void @test_vsuxseg6_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15394,14 +15394,14 @@ define void @test_vsuxseg6_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15414,14 +15414,14 @@ define void @test_vsuxseg6_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15431,14 +15431,14 @@ define void @test_vsuxseg6_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15451,14 +15451,14 @@ define void @test_vsuxseg6_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15468,14 +15468,14 @@ define void @test_vsuxseg6_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15488,14 +15488,14 @@ define void @test_vsuxseg6_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15505,14 +15505,14 @@ define void @test_vsuxseg6_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15525,15 +15525,15 @@ define void @test_vsuxseg7_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15543,15 +15543,15 @@ define void @test_vsuxseg7_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15564,15 +15564,15 @@ define void @test_vsuxseg7_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15582,15 +15582,15 @@ define void @test_vsuxseg7_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15603,15 +15603,15 @@ define void @test_vsuxseg7_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15621,15 +15621,15 @@ define void @test_vsuxseg7_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15642,15 +15642,15 @@ define void @test_vsuxseg7_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15660,15 +15660,15 @@ define void @test_vsuxseg7_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15681,16 +15681,16 @@ define void @test_vsuxseg8_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15700,16 +15700,16 @@ define void @test_vsuxseg8_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15722,16 +15722,16 @@ define void @test_vsuxseg8_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15741,16 +15741,16 @@ define void @test_vsuxseg8_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15763,16 +15763,16 @@ define void @test_vsuxseg8_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15782,16 +15782,16 @@ define void @test_vsuxseg8_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15804,16 +15804,16 @@ define void @test_vsuxseg8_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15823,16 +15823,16 @@ define void @test_vsuxseg8_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15965,11 +15965,11 @@ define void @test_vsuxseg3_nxv8f16_nxv8i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -15979,11 +15979,11 @@ define void @test_vsuxseg3_mask_nxv8f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -15996,11 +15996,11 @@ define void @test_vsuxseg3_nxv8f16_nxv8i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16010,11 +16010,11 @@ define void @test_vsuxseg3_mask_nxv8f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16058,11 +16058,11 @@ define void @test_vsuxseg3_nxv8f16_nxv8i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16072,11 +16072,11 @@ define void @test_vsuxseg3_mask_nxv8f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16089,12 +16089,12 @@ define void @test_vsuxseg4_nxv8f16_nxv8i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16104,12 +16104,12 @@ define void @test_vsuxseg4_mask_nxv8f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16122,12 +16122,12 @@ define void @test_vsuxseg4_nxv8f16_nxv8i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16137,12 +16137,12 @@ define void @test_vsuxseg4_mask_nxv8f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16188,12 +16188,12 @@ define void @test_vsuxseg4_nxv8f16_nxv8i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16203,12 +16203,12 @@ define void @test_vsuxseg4_mask_nxv8f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16467,11 +16467,11 @@ define void @test_vsuxseg3_nxv2f64_nxv2i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16481,11 +16481,11 @@ define void @test_vsuxseg3_mask_nxv2f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16498,11 +16498,11 @@ define void @test_vsuxseg3_nxv2f64_nxv2i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16512,11 +16512,11 @@ define void @test_vsuxseg3_mask_nxv2f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16529,11 +16529,11 @@ define void @test_vsuxseg3_nxv2f64_nxv2i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16543,11 +16543,11 @@ define void @test_vsuxseg3_mask_nxv2f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16560,11 +16560,11 @@ define void @test_vsuxseg3_nxv2f64_nxv2i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16574,11 +16574,11 @@ define void @test_vsuxseg3_mask_nxv2f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16591,12 +16591,12 @@ define void @test_vsuxseg4_nxv2f64_nxv2i32( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16606,12 +16606,12 @@ define void @test_vsuxseg4_mask_nxv2f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16624,12 +16624,12 @@ define void @test_vsuxseg4_nxv2f64_nxv2i8( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16639,12 +16639,12 @@ define void @test_vsuxseg4_mask_nxv2f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16657,12 +16657,12 @@ define void @test_vsuxseg4_nxv2f64_nxv2i16( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16672,12 +16672,12 @@ define void @test_vsuxseg4_mask_nxv2f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16690,12 +16690,12 @@ define void @test_vsuxseg4_nxv2f64_nxv2i64( %val, double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16705,12 +16705,12 @@ define void @test_vsuxseg4_mask_nxv2f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16843,11 +16843,11 @@ define void @test_vsuxseg3_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16857,11 +16857,11 @@ define void @test_vsuxseg3_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16874,11 +16874,11 @@ define void @test_vsuxseg3_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16888,11 +16888,11 @@ define void @test_vsuxseg3_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16936,11 +16936,11 @@ define void @test_vsuxseg3_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16950,11 +16950,11 @@ define void @test_vsuxseg3_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16967,12 +16967,12 @@ define void @test_vsuxseg4_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16982,12 +16982,12 @@ define void @test_vsuxseg4_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17000,12 +17000,12 @@ define void @test_vsuxseg4_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17015,12 +17015,12 @@ define void @test_vsuxseg4_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17066,12 +17066,12 @@ define void @test_vsuxseg4_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17081,12 +17081,12 @@ define void @test_vsuxseg4_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17099,13 +17099,13 @@ define void @test_vsuxseg5_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17115,13 +17115,13 @@ define void @test_vsuxseg5_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17134,13 +17134,13 @@ define void @test_vsuxseg5_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17150,13 +17150,13 @@ define void @test_vsuxseg5_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17169,13 +17169,13 @@ define void @test_vsuxseg5_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17185,13 +17185,13 @@ define void @test_vsuxseg5_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17204,13 +17204,13 @@ define void @test_vsuxseg5_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17220,13 +17220,13 @@ define void @test_vsuxseg5_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17239,14 +17239,14 @@ define void @test_vsuxseg6_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17256,14 +17256,14 @@ define void @test_vsuxseg6_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17276,14 +17276,14 @@ define void @test_vsuxseg6_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17293,14 +17293,14 @@ define void @test_vsuxseg6_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17313,14 +17313,14 @@ define void @test_vsuxseg6_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17330,14 +17330,14 @@ define void @test_vsuxseg6_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17350,14 +17350,14 @@ define void @test_vsuxseg6_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17367,14 +17367,14 @@ define void @test_vsuxseg6_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17387,15 +17387,15 @@ define void @test_vsuxseg7_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17405,15 +17405,15 @@ define void @test_vsuxseg7_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17426,15 +17426,15 @@ define void @test_vsuxseg7_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17444,15 +17444,15 @@ define void @test_vsuxseg7_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17465,15 +17465,15 @@ define void @test_vsuxseg7_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17483,15 +17483,15 @@ define void @test_vsuxseg7_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17504,15 +17504,15 @@ define void @test_vsuxseg7_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17522,15 +17522,15 @@ define void @test_vsuxseg7_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17543,16 +17543,16 @@ define void @test_vsuxseg8_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17562,16 +17562,16 @@ define void @test_vsuxseg8_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17584,16 +17584,16 @@ define void @test_vsuxseg8_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17603,16 +17603,16 @@ define void @test_vsuxseg8_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17625,16 +17625,16 @@ define void @test_vsuxseg8_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17644,16 +17644,16 @@ define void @test_vsuxseg8_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17666,16 +17666,16 @@ define void @test_vsuxseg8_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17685,16 +17685,16 @@ define void @test_vsuxseg8_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17829,11 +17829,11 @@ define void @test_vsuxseg3_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17843,11 +17843,11 @@ define void @test_vsuxseg3_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17860,11 +17860,11 @@ define void @test_vsuxseg3_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17874,11 +17874,11 @@ define void @test_vsuxseg3_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17891,11 +17891,11 @@ define void @test_vsuxseg3_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17905,11 +17905,11 @@ define void @test_vsuxseg3_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17922,11 +17922,11 @@ define void @test_vsuxseg3_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17936,11 +17936,11 @@ define void @test_vsuxseg3_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17953,12 +17953,12 @@ define void @test_vsuxseg4_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17968,12 +17968,12 @@ define void @test_vsuxseg4_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17986,12 +17986,12 @@ define void @test_vsuxseg4_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18001,12 +18001,12 @@ define void @test_vsuxseg4_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18019,12 +18019,12 @@ define void @test_vsuxseg4_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18034,12 +18034,12 @@ define void @test_vsuxseg4_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18052,12 +18052,12 @@ define void @test_vsuxseg4_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18067,12 +18067,12 @@ define void @test_vsuxseg4_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18085,13 +18085,13 @@ define void @test_vsuxseg5_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18101,13 +18101,13 @@ define void @test_vsuxseg5_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18120,13 +18120,13 @@ define void @test_vsuxseg5_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18136,13 +18136,13 @@ define void @test_vsuxseg5_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18155,13 +18155,13 @@ define void @test_vsuxseg5_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18171,13 +18171,13 @@ define void @test_vsuxseg5_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18190,13 +18190,13 @@ define void @test_vsuxseg5_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18206,13 +18206,13 @@ define void @test_vsuxseg5_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18225,14 +18225,14 @@ define void @test_vsuxseg6_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18242,14 +18242,14 @@ define void @test_vsuxseg6_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18262,14 +18262,14 @@ define void @test_vsuxseg6_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18279,14 +18279,14 @@ define void @test_vsuxseg6_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18299,14 +18299,14 @@ define void @test_vsuxseg6_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18316,14 +18316,14 @@ define void @test_vsuxseg6_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18336,14 +18336,14 @@ define void @test_vsuxseg6_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18353,14 +18353,14 @@ define void @test_vsuxseg6_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18373,15 +18373,15 @@ define void @test_vsuxseg7_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18391,15 +18391,15 @@ define void @test_vsuxseg7_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18412,15 +18412,15 @@ define void @test_vsuxseg7_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18430,15 +18430,15 @@ define void @test_vsuxseg7_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18451,15 +18451,15 @@ define void @test_vsuxseg7_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18469,15 +18469,15 @@ define void @test_vsuxseg7_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18490,15 +18490,15 @@ define void @test_vsuxseg7_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18508,15 +18508,15 @@ define void @test_vsuxseg7_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18529,16 +18529,16 @@ define void @test_vsuxseg8_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18548,16 +18548,16 @@ define void @test_vsuxseg8_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18570,16 +18570,16 @@ define void @test_vsuxseg8_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18589,16 +18589,16 @@ define void @test_vsuxseg8_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18611,16 +18611,16 @@ define void @test_vsuxseg8_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18630,16 +18630,16 @@ define void @test_vsuxseg8_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18652,16 +18652,16 @@ define void @test_vsuxseg8_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vmv1r.v v3, v0 -; CHECK-NEXT: vmv1r.v v4, v0 -; CHECK-NEXT: vmv1r.v v5, v0 -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18671,16 +18671,16 @@ define void @test_vsuxseg8_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v1, v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18815,11 +18815,11 @@ define void @test_vsuxseg3_nxv4f32_nxv4i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18829,11 +18829,11 @@ define void @test_vsuxseg3_mask_nxv4f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18846,11 +18846,11 @@ define void @test_vsuxseg3_nxv4f32_nxv4i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18860,11 +18860,11 @@ define void @test_vsuxseg3_mask_nxv4f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18877,11 +18877,11 @@ define void @test_vsuxseg3_nxv4f32_nxv4i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18891,11 +18891,11 @@ define void @test_vsuxseg3_mask_nxv4f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18908,11 +18908,11 @@ define void @test_vsuxseg3_nxv4f32_nxv4i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18922,11 +18922,11 @@ define void @test_vsuxseg3_mask_nxv4f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18939,12 +18939,12 @@ define void @test_vsuxseg4_nxv4f32_nxv4i32( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -18954,12 +18954,12 @@ define void @test_vsuxseg4_mask_nxv4f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18972,12 +18972,12 @@ define void @test_vsuxseg4_nxv4f32_nxv4i8( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -18987,12 +18987,12 @@ define void @test_vsuxseg4_mask_nxv4f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -19005,12 +19005,12 @@ define void @test_vsuxseg4_nxv4f32_nxv4i64( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v12 +; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -19020,12 +19020,12 @@ define void @test_vsuxseg4_mask_nxv4f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -19038,12 +19038,12 @@ define void @test_vsuxseg4_nxv4f32_nxv4i16( %val, float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v0, v8 -; CHECK-NEXT: vmv2r.v v2, v0 -; CHECK-NEXT: vmv2r.v v4, v0 -; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -19053,12 +19053,12 @@ define void @test_vsuxseg4_mask_nxv4f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv2r.v v2, v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl)