diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -39,7 +39,7 @@ N->getValueType(0)); }]>; -defvar TAIL_UNDISTURBED = 0; +defvar TAIL_UNDISTURBED_MASK_UNDISTURBED = 0; defvar TAIL_AGNOSTIC = 1; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -1213,7 +1213,7 @@ VLOpFrag)), (!cast("PseudoVFMADD_VV_"# suffix #"_MASK") vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; def : Pat<(vti.Vector (riscv_fma_vl vti.RegClass:$rs1, vti.RegClass:$rd, (riscv_fneg_vl vti.RegClass:$rs2, @@ -1293,7 +1293,7 @@ VLOpFrag)), (!cast("PseudoVFMADD_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rd, @@ -1895,7 +1895,7 @@ VLOpFrag)), (!cast("PseudoVSLIDEUP_VI_"#vti.LMul.MX) vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2, - GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3), (vti.Vector vti.RegClass:$rs1), @@ -1903,7 +1903,7 @@ VLOpFrag)), (!cast("PseudoVSLIDEUP_VX_"#vti.LMul.MX) vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, - GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>; def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3), (vti.Vector vti.RegClass:$rs1), diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll @@ -482,10 +482,10 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v14, v10, a0 ; CHECK-NEXT: vslidedown.vx v12, v9, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v13, v14, 0 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v12, v10, a0 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll @@ -515,7 +515,7 @@ ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: vslide1up.vx v9, v8, a1 ; RV32-NEXT: vslide1up.vx v10, v9, a0 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, mu ; RV32-NEXT: vslideup.vi v8, v10, 0 ; RV32-NEXT: ret ; @@ -554,7 +554,7 @@ ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: vslide1up.vx v9, v8, a1 ; RV32-NEXT: vslide1up.vx v10, v9, a0 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, mu ; RV32-NEXT: vslideup.vi v8, v10, 0 ; RV32-NEXT: ret ; @@ -593,7 +593,7 @@ ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: vslide1up.vx v9, v8, a1 ; RV32-NEXT: vslide1up.vx v10, v9, a0 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, mu ; RV32-NEXT: vslideup.vi v8, v10, 0 ; RV32-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll @@ -1451,7 +1451,7 @@ ; LMULMAX8-NEXT: vmerge.vim v16, v16, 1, v0 ; LMULMAX8-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX8-NEXT: vmv.v.i v17, 0 -; LMULMAX8-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX8-NEXT: vslideup.vi v17, v16, 0 ; LMULMAX8-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX8-NEXT: vmsne.vi v16, v17, 0 @@ -1489,7 +1489,7 @@ ; LMULMAX4-NEXT: vmerge.vim v12, v12, 1, v0 ; LMULMAX4-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX4-NEXT: vmv.v.i v13, 0 -; LMULMAX4-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX4-NEXT: vslideup.vi v13, v12, 0 ; LMULMAX4-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX4-NEXT: vmsne.vi v12, v13, 0 @@ -1533,7 +1533,7 @@ ; LMULMAX2-NEXT: vmerge.vim v10, v10, 1, v0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v11, 0 -; LMULMAX2-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX2-NEXT: vslideup.vi v11, v10, 0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v10, v11, 0 @@ -1589,7 +1589,7 @@ ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmv.v.i v10, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v9, v10, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll @@ -558,7 +558,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -849,7 +849,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1164,7 +1164,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1188,7 +1188,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1277,19 +1277,19 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 12 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1313,12 +1313,12 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse16.v v10, (a1) @@ -1489,7 +1489,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1520,7 +1520,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1544,7 +1544,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1575,7 +1575,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 @@ -1583,7 +1583,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 @@ -1591,7 +1591,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1622,19 +1622,19 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1658,12 +1658,12 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse32.v v10, (a1) @@ -1696,7 +1696,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 @@ -1704,7 +1704,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 @@ -1712,7 +1712,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v12 @@ -1720,7 +1720,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 10, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 10, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 @@ -1728,7 +1728,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 10 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v14 @@ -1736,7 +1736,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 14, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 14, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 12 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 @@ -1744,7 +1744,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 14 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret @@ -1763,7 +1763,7 @@ ; LMULMAX4-NEXT: vncvt.x.x.w v8, v14 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX4-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX4-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -1783,19 +1783,19 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 @@ -1805,19 +1805,19 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v10, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v14 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 6 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse16.v v10, (a1) @@ -1834,7 +1834,7 @@ ; LMULMAX4-NEXT: vncvt.x.x.w v14, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX4-NEXT: vncvt.x.x.w v8, v14 -; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, tu, mu ; LMULMAX4-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX4-NEXT: vse16.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -1849,22 +1849,22 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 ; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v11, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 ; LMULMAX1-NEXT: vncvt.x.x.w v12, v14 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v9, 2 ; LMULMAX1-NEXT: addi a1, a0, 48 ; LMULMAX1-NEXT: vse32.v v12, (a1) @@ -1880,7 +1880,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX4-NEXT: vncvt.x.x.w v16, v12 ; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, tu, mu ; LMULMAX4-NEXT: vslideup.vi v12, v16, 8 ; LMULMAX4-NEXT: vse32.v v12, (a0) ; LMULMAX4-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll @@ -346,7 +346,7 @@ ; LMULMAX2-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v9, 0 -; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; LMULMAX2-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v8, v9, 0 @@ -362,7 +362,7 @@ ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v8, v9, 0 @@ -390,7 +390,7 @@ ; LMULMAX2-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v9, 0 -; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; LMULMAX2-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v8, v9, 0 @@ -411,7 +411,7 @@ ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v8, v9, 0 @@ -440,7 +440,7 @@ ; LMULMAX2-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v9, 0 -; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; LMULMAX2-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v8, v9, 0 @@ -462,7 +462,7 @@ ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v8, v9, 0 @@ -482,7 +482,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -507,7 +507,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -526,7 +526,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -551,7 +551,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -577,7 +577,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -602,7 +602,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll @@ -202,7 +202,7 @@ ; RV32-FP-NEXT: vmv.v.i v8, 0 ; RV32-FP-NEXT: vslide1up.vx v9, v8, a1 ; RV32-FP-NEXT: vslide1up.vx v10, v9, a0 -; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, mu ; RV32-FP-NEXT: vslideup.vi v8, v10, 0 ; RV32-FP-NEXT: ret ; @@ -222,7 +222,7 @@ ; RV32-FP-NEXT: vmv.v.i v8, 0 ; RV32-FP-NEXT: vslide1up.vx v9, v8, a1 ; RV32-FP-NEXT: vslide1up.vx v10, v9, a0 -; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, mu ; RV32-FP-NEXT: vslideup.vi v8, v10, 0 ; RV32-FP-NEXT: ret ; @@ -242,7 +242,7 @@ ; RV32-FP-NEXT: vmv.v.i v8, 0 ; RV32-FP-NEXT: vslide1up.vx v9, v8, a1 ; RV32-FP-NEXT: vslide1up.vx v10, v9, a0 -; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, mu ; RV32-FP-NEXT: vslideup.vi v8, v10, 0 ; RV32-FP-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll @@ -117,7 +117,7 @@ ; CHECK-NEXT: addi a1, a1, %lo(.LCPI4_0) ; CHECK-NEXT: vlse32.v v8, (a1), zero ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v8, (a0) @@ -132,7 +132,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v9, (a0) @@ -153,7 +153,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v8, ft0 ; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v9, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll @@ -164,7 +164,7 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v10, v8 ; LMULMAX1-NEXT: vfncvt.f.f.w v8, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 4 ; LMULMAX1-NEXT: vse16.v v10, (a1) ; LMULMAX1-NEXT: ret @@ -204,19 +204,19 @@ ; LMULMAX1-NEXT: vfncvt.rod.f.f.w v12, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rod.f.f.w v11, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v9, v11 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rod.f.f.w v9, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v8, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX1-NEXT: vse16.v v10, (a1) ; LMULMAX1-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll @@ -46,7 +46,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -71,7 +71,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -96,7 +96,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -121,7 +121,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -210,7 +210,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -235,7 +235,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -468,7 +468,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -495,7 +495,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -551,7 +551,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -576,7 +576,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -601,7 +601,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -626,7 +626,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -715,7 +715,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -740,7 +740,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -974,7 +974,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1002,7 +1002,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1059,7 +1059,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1084,7 +1084,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1109,7 +1109,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1134,7 +1134,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1223,7 +1223,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1248,7 +1248,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1482,7 +1482,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -1510,7 +1510,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll @@ -259,7 +259,7 @@ define <4 x half> @slideup_v4f16(<4 x half> %x) { ; CHECK-LABEL: slideup_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -270,9 +270,9 @@ define <8 x float> @slideup_v8f32(<8 x float> %x) { ; CHECK-LABEL: slideup_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v10, v8, 3 -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %s = shufflevector <8 x float> %x, <8 x float> poison, <8 x i32> ret <8 x float> %s @@ -283,9 +283,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 7, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v10, v8, 7 -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %s = shufflevector <8 x float> %x, <8 x float> poison, <8 x i32> ret <8 x float> %s @@ -296,9 +296,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v12, v8, 6 -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v12, v8, 2 -; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %s = shufflevector <8 x double> %x, <8 x double> poison, <8 x i32> ret <8 x double> %s @@ -309,7 +309,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 6, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 6 ; CHECK-NEXT: ret %s = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> @@ -321,9 +321,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 3, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v12, v12, 5 -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v12, v8, 3 -; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %s = shufflevector <8 x double> %x, <8 x double> %y, <8 x i32> ret <8 x double> %s diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll @@ -135,8 +135,9 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX1-NEXT: ret %z = fptosi <8 x float> %x to <8 x i1> @@ -168,8 +169,9 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX1-NEXT: ret %z = fptoui <8 x float> %x to <8 x i1> @@ -436,7 +438,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v11, v11 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v11, v9 @@ -444,7 +446,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v9, v8 @@ -452,7 +454,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX1-NEXT: vse8.v v10, (a1) ; LMULMAX1-NEXT: ret @@ -498,7 +500,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v11, v11 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v11, v9 @@ -506,7 +508,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v9, v8 @@ -514,7 +516,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v8, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX1-NEXT: vse8.v v10, (a1) ; LMULMAX1-NEXT: ret @@ -549,7 +551,7 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v13, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v13, 2 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v12, 0 @@ -560,7 +562,7 @@ ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmerge.vim v10, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v10, 4 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v12, 0 @@ -571,8 +573,9 @@ ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX1-NEXT: ret %z = fptosi <8 x double> %x to <8 x i1> @@ -604,7 +607,7 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v13, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v13, 2 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v12, 0 @@ -615,7 +618,7 @@ ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmerge.vim v10, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v10, 4 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v12, 0 @@ -626,8 +629,9 @@ ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX1-NEXT: ret %z = fptoui <8 x double> %x to <8 x i1> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll @@ -482,19 +482,19 @@ ; LMULMAX1-NEXT: vfncvt.f.x.w v12, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.x.w v11, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v9, v11 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.x.w v9, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v8, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX1-NEXT: vse16.v v10, (a1) ; LMULMAX1-NEXT: ret @@ -534,19 +534,19 @@ ; LMULMAX1-NEXT: vfncvt.f.xu.w v12, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.xu.w v11, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v9, v11 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.f.xu.w v9, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; LMULMAX1-NEXT: vfncvt.f.f.w v8, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX1-NEXT: vse16.v v10, (a1) ; LMULMAX1-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll @@ -28,7 +28,7 @@ ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32-NEXT: addi a0, a1, 1 -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu ; RV32-NEXT: vslideup.vx v9, v8, a1 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vand.vi v8, v9, 1 @@ -43,7 +43,7 @@ ; RV64-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64-NEXT: sext.w a0, a1 ; RV64-NEXT: addi a1, a0, 1 -; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; RV64-NEXT: vslideup.vx v9, v8, a0 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vand.vi v8, v9, 1 @@ -60,7 +60,9 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 1 +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -76,7 +78,7 @@ ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32-NEXT: addi a0, a1, 1 -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu ; RV32-NEXT: vslideup.vx v9, v8, a1 ; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32-NEXT: vand.vi v8, v9, 1 @@ -91,7 +93,7 @@ ; RV64-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64-NEXT: sext.w a0, a1 ; RV64-NEXT: addi a1, a0, 1 -; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; RV64-NEXT: vslideup.vx v9, v8, a0 ; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64-NEXT: vand.vi v8, v9, 1 @@ -108,7 +110,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -126,7 +128,7 @@ ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32-NEXT: addi a0, a1, 1 -; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; RV32-NEXT: vslideup.vx v9, v8, a1 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vand.vi v8, v9, 1 @@ -141,7 +143,7 @@ ; RV64-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64-NEXT: sext.w a0, a1 ; RV64-NEXT: addi a1, a0, 1 -; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; RV64-NEXT: vslideup.vx v9, v8, a0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vand.vi v8, v9, 1 @@ -159,7 +161,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, m4, tu, mu ; CHECK-NEXT: vslideup.vi v12, v8, 1 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 1 @@ -178,7 +180,7 @@ ; RV32-NEXT: vmv.v.i v12, 0 ; RV32-NEXT: vmerge.vim v12, v12, 1, v0 ; RV32-NEXT: addi a0, a1, 1 -; RV32-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, m4, tu, mu ; RV32-NEXT: vslideup.vx v12, v8, a1 ; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; RV32-NEXT: vand.vi v8, v12, 1 @@ -194,7 +196,7 @@ ; RV64-NEXT: vmerge.vim v12, v12, 1, v0 ; RV64-NEXT: sext.w a0, a1 ; RV64-NEXT: addi a1, a0, 1 -; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e8, m4, tu, mu ; RV64-NEXT: vslideup.vx v12, v8, a0 ; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; RV64-NEXT: vand.vi v8, v12, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -14,7 +14,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v12, (a0) -; CHECK-NEXT: vsetivli zero, 2, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 0 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -27,7 +27,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v12, (a0) -; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 2 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -40,7 +40,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v12, (a0) -; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 6 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -53,7 +53,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v12, (a0) -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m4, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m4, tu, mu ; LMULMAX2-NEXT: vslideup.vi v8, v12, 0 ; LMULMAX2-NEXT: ret ; @@ -63,9 +63,9 @@ ; LMULMAX1-NEXT: vle32.v v12, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v16, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m4, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v12, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e32, m4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e32, m4, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v16, 4 ; LMULMAX1-NEXT: ret %sv = load <8 x i32>, <8 x i32>* %svp @@ -78,7 +78,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v12, (a0) -; LMULMAX2-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e32, m4, tu, mu ; LMULMAX2-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX2-NEXT: ret ; @@ -88,9 +88,9 @@ ; LMULMAX1-NEXT: vle32.v v12, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v16, (a0) -; LMULMAX1-NEXT: vsetivli zero, 12, e32, m4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 12, e32, m4, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v12, 8 -; LMULMAX1-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e32, m4, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v16, 12 ; LMULMAX1-NEXT: ret %sv = load <8 x i32>, <8 x i32>* %svp @@ -116,7 +116,7 @@ ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v9, (a0) -; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v9, (a0) @@ -135,6 +135,7 @@ ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v9, (a0) +; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 2 ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: ret @@ -166,7 +167,7 @@ ; LMULMAX2-NEXT: vle32.v v8, (a1) ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v10, (a0) -; LMULMAX2-NEXT: vsetivli zero, 2, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e32, m2, tu, mu ; LMULMAX2-NEXT: vslideup.vi v10, v8, 0 ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vse32.v v10, (a0) @@ -178,7 +179,7 @@ ; LMULMAX1-NEXT: vle32.v v8, (a1) ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vle32.v v9, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vse32.v v9, (a0) @@ -197,7 +198,7 @@ ; LMULMAX2-NEXT: vle32.v v8, (a1) ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v10, (a0) -; LMULMAX2-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e32, m2, tu, mu ; LMULMAX2-NEXT: vslideup.vi v10, v8, 2 ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vse32.v v10, (a0) @@ -209,6 +210,7 @@ ; LMULMAX1-NEXT: vle32.v v8, (a1) ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vle32.v v9, (a0) +; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 2 ; LMULMAX1-NEXT: vse32.v v9, (a0) ; LMULMAX1-NEXT: ret @@ -226,6 +228,7 @@ ; LMULMAX2-NEXT: vle32.v v8, (a1) ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v10, (a0) +; LMULMAX2-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; LMULMAX2-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX2-NEXT: vse32.v v10, (a0) ; LMULMAX2-NEXT: ret @@ -237,6 +240,7 @@ ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vle32.v v9, (a0) +; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 2 ; LMULMAX1-NEXT: vse32.v v9, (a0) ; LMULMAX1-NEXT: ret @@ -252,7 +256,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX2-NEXT: vle32.v v8, (a1) -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, tu, mu ; LMULMAX2-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX2-NEXT: vse32.v v10, (a0) ; LMULMAX2-NEXT: ret @@ -261,7 +265,7 @@ ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vle32.v v8, (a1) -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 2 ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vse32.v v9, (a0) @@ -279,7 +283,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v9, (a1) -; CHECK-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vse16.v v8, (a0) @@ -298,7 +302,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v9, (a1) -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -317,7 +321,7 @@ ; LMULMAX2-NEXT: vlm.v v8, (a0) ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vlm.v v9, (a1) -; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, tu, mu ; LMULMAX2-NEXT: vslideup.vi v8, v9, 0 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vsm.v v8, (a0) @@ -329,7 +333,7 @@ ; LMULMAX1-NEXT: vlm.v v8, (a0) ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vlm.v v9, (a1) -; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-NEXT: vsm.v v8, (a0) @@ -349,7 +353,7 @@ ; LMULMAX2-NEXT: vlm.v v8, (a0) ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vlm.v v9, (a1) -; LMULMAX2-NEXT: vsetivli zero, 3, e8, mf4, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 3, e8, mf4, tu, mu ; LMULMAX2-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vsm.v v8, (a0) @@ -362,7 +366,7 @@ ; LMULMAX1-NEXT: vlm.v v8, (a0) ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vlm.v v9, (a1) -; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-NEXT: vsm.v v8, (a0) @@ -388,7 +392,7 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -415,8 +419,9 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 4 +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -432,7 +437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) -; CHECK-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 0 ; CHECK-NEXT: ret %sv = load <2 x i16>, <2 x i16>* %svp @@ -445,7 +450,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) -; CHECK-NEXT: vsetivli zero, 6, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 6, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %sv = load <2 x i16>, <2 x i16>* %svp @@ -465,6 +470,7 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmsne.vi v0, v9, 0 @@ -479,7 +485,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v0, v8, 0 ; CHECK-NEXT: ret %sv = load <8 x i1>, <8 x i1>* %svp @@ -492,7 +498,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 3, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v0, v8, 2 ; CHECK-NEXT: ret %sv = load <8 x i1>, <8 x i1>* %svp @@ -508,7 +514,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v16, (a1) -; CHECK-NEXT: vsetivli zero, 6, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 6, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 4 ; CHECK-NEXT: vs8r.v v8, (a2) ; CHECK-NEXT: ret @@ -538,7 +544,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v16, v8, 2 ; CHECK-NEXT: vs8r.v v16, (a1) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll @@ -14,7 +14,7 @@ ; RV32-NEXT: vmv.v.i v10, 0 ; RV32-NEXT: vslide1up.vx v12, v10, a2 ; RV32-NEXT: vslide1up.vx v10, v12, a1 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; RV32-NEXT: vslideup.vi v8, v10, 3 ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: ret @@ -24,6 +24,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v10, a1 +; RV64-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; RV64-NEXT: vslideup.vi v8, v10, 3 ; RV64-NEXT: vse64.v v8, (a0) ; RV64-NEXT: ret @@ -48,13 +49,13 @@ ; RV32-NEXT: vlse32.v v10, (a4), zero ; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; RV32-NEXT: vmv.s.x v10, a3 -; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; RV32-NEXT: vslideup.vi v8, v10, 2 ; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, mu ; RV32-NEXT: vmv.v.i v10, 0 ; RV32-NEXT: vslide1up.vx v12, v10, a2 ; RV32-NEXT: vslide1up.vx v10, v12, a1 -; RV32-NEXT: vsetivli zero, 3, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 3, e64, m2, tu, mu ; RV32-NEXT: vslideup.vi v8, v10, 2 ; RV32-NEXT: sw a1, 16(a0) ; RV32-NEXT: sw a2, 20(a0) @@ -78,7 +79,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, a1 -; CHECK-NEXT: vsetivli zero, 15, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 15, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 14 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vse8.v v8, (a0) @@ -97,7 +98,7 @@ ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: vmv.s.x v12, a1 ; RV32-NEXT: addi a1, a2, 1 -; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m4, tu, mu ; RV32-NEXT: vslideup.vx v8, v12, a2 ; RV32-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; RV32-NEXT: vse16.v v8, (a0) @@ -111,7 +112,7 @@ ; RV64-NEXT: vmv.s.x v12, a1 ; RV64-NEXT: sext.w a1, a2 ; RV64-NEXT: addi a2, a1, 1 -; RV64-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; RV64-NEXT: vsetvli zero, a2, e16, m4, tu, mu ; RV64-NEXT: vslideup.vx v8, v12, a1 ; RV64-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; RV64-NEXT: vse16.v v8, (a0) @@ -129,7 +130,7 @@ ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vfmv.s.f v10, fa0 ; RV32-NEXT: addi a2, a1, 1 -; RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m2, tu, mu ; RV32-NEXT: vslideup.vx v8, v10, a1 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vse32.v v8, (a0) @@ -142,7 +143,7 @@ ; RV64-NEXT: vfmv.s.f v10, fa0 ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: addi a2, a1, 1 -; RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a2, e32, m2, tu, mu ; RV64-NEXT: vslideup.vx v8, v10, a1 ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vse32.v v8, (a0) @@ -177,7 +178,7 @@ ; RV32-NEXT: li a2, -1 ; RV32-NEXT: vmv.s.x v12, a2 ; RV32-NEXT: addi a2, a1, 1 -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vslideup.vx v8, v12, a1 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vse64.v v8, (a0) @@ -191,7 +192,7 @@ ; RV64-NEXT: vmv.s.x v12, a2 ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: addi a2, a1, 1 -; RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV64-NEXT: vslideup.vx v8, v12, a1 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vse64.v v8, (a0) @@ -226,7 +227,7 @@ ; RV32-NEXT: li a2, 6 ; RV32-NEXT: vmv.s.x v12, a2 ; RV32-NEXT: addi a2, a1, 1 -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV32-NEXT: vslideup.vx v8, v12, a1 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vse64.v v8, (a0) @@ -240,7 +241,7 @@ ; RV64-NEXT: vmv.s.x v12, a2 ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: addi a2, a1, 1 -; RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a2, e64, m4, tu, mu ; RV64-NEXT: vslideup.vx v8, v12, a1 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vse64.v v8, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll @@ -195,7 +195,7 @@ ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vmv.v.i v8, 0 -; RV32-NEXT: vsetivli zero, 3, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu ; RV32-NEXT: vslideup.vi v8, v9, 2 ; RV32-NEXT: lui a0, %hi(.LCPI12_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI12_0) @@ -219,7 +219,7 @@ ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vmv.v.i v8, 0 -; RV32-NEXT: vsetivli zero, 3, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu ; RV32-NEXT: vslideup.vi v8, v9, 2 ; RV32-NEXT: lui a0, %hi(.LCPI13_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI13_0) @@ -312,7 +312,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vmv.v.i v9, 8 -; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 3 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vse16.v v9, (a0) @@ -549,13 +549,14 @@ ; RV32-NEXT: vse32.v v8, (a3) ; RV32-NEXT: vse32.v v8, (a4) ; RV32-NEXT: vmv.s.x v8, zero -; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, mu ; RV32-NEXT: vslideup.vi v9, v8, 1 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vse32.v v9, (a5) ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vmv.s.x v8, a0 ; RV32-NEXT: vmv.v.i v9, 0 +; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; RV32-NEXT: vslideup.vi v9, v8, 3 ; RV32-NEXT: vse32.v v9, (a6) ; RV32-NEXT: ret @@ -572,13 +573,14 @@ ; RV64-NEXT: vse32.v v8, (a3) ; RV64-NEXT: vse32.v v8, (a4) ; RV64-NEXT: vmv.s.x v8, zero -; RV64-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, m1, tu, mu ; RV64-NEXT: vslideup.vi v9, v8, 1 ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vse32.v v9, (a5) ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vmv.s.x v8, a0 ; RV64-NEXT: vmv.v.i v9, 0 +; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; RV64-NEXT: vslideup.vi v9, v8, 3 ; RV64-NEXT: vse32.v v9, (a6) ; RV64-NEXT: ret @@ -610,12 +612,13 @@ ; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v10, 4 -; CHECK-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v10, v8, 1 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vse16.v v10, (a5) ; CHECK-NEXT: li a0, 4 ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 3 ; CHECK-NEXT: vse16.v v9, (a6) ; CHECK-NEXT: ret @@ -752,7 +755,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: vsetivli zero, 7, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 7, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 6 ; CHECK-NEXT: ret ret <16 x i8> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll @@ -217,7 +217,7 @@ ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse8.v v8, (a1) ; LMULMAX1-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -192,37 +192,41 @@ ; RV32-NEXT: li a0, 5 ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vmv.s.x v16, a0 +; RV32-NEXT: vmv.v.i v20, 2 +; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV32-NEXT: vslideup.vi v20, v16, 7 ; RV32-NEXT: lui a0, %hi(.LCPI11_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI11_0) -; RV32-NEXT: vle16.v v20, (a0) -; RV32-NEXT: vmv.v.i v21, 2 -; RV32-NEXT: vslideup.vi v21, v16, 7 +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV32-NEXT: vle16.v v21, (a0) ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; RV32-NEXT: vrgatherei16.vv v16, v8, v20 +; RV32-NEXT: vrgatherei16.vv v16, v8, v21 ; RV32-NEXT: li a0, 164 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vrgatherei16.vv v16, v12, v21, v0.t +; RV32-NEXT: vrgatherei16.vv v16, v12, v20, v0.t ; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_vv_v8i64: ; RV64: # %bb.0: +; RV64-NEXT: li a0, 5 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vmv.s.x v16, a0 +; RV64-NEXT: vmv.v.i v20, 2 +; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; RV64-NEXT: vslideup.vi v20, v16, 7 ; RV64-NEXT: lui a0, %hi(.LCPI11_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI11_0) -; RV64-NEXT: vle64.v v20, (a0) -; RV64-NEXT: li a0, 5 -; RV64-NEXT: vmv.s.x v16, a0 -; RV64-NEXT: vmv.v.i v24, 2 -; RV64-NEXT: vslideup.vi v24, v16, 7 -; RV64-NEXT: vrgather.vv v16, v8, v20 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vle64.v v24, (a0) +; RV64-NEXT: vrgather.vv v16, v8, v24 ; RV64-NEXT: li a0, 164 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vrgather.vv v16, v12, v24, v0.t +; RV64-NEXT: vrgather.vv v16, v12, v20, v0.t ; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %s = shufflevector <8 x i64> %x, <8 x i64> %y, <8 x i32> @@ -358,7 +362,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vmv.v.i v10, 4 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v10, v9, 1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -459,9 +463,10 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vmv.v.i v11, 0 -; CHECK-NEXT: vsetivli zero, 3, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v11, v10, 2 ; CHECK-NEXT: li a0, 70 +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 2 @@ -479,7 +484,7 @@ ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.s.x v10, a0 ; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: vsetivli zero, 6, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; RV32-NEXT: vslideup.vi v11, v10, 5 ; RV32-NEXT: lui a0, 8256 ; RV32-NEXT: addi a0, a0, 2 @@ -499,7 +504,7 @@ ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vmv.v.i v11, 0 -; RV64-NEXT: vsetivli zero, 6, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; RV64-NEXT: vslideup.vi v11, v10, 5 ; RV64-NEXT: lui a0, 8256 ; RV64-NEXT: addiw a0, a0, 2 @@ -521,7 +526,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 3 @@ -553,7 +558,7 @@ define <4 x i16> @slideup_v4i16(<4 x i16> %x) { ; CHECK-LABEL: slideup_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -564,9 +569,9 @@ define <8 x i32> @slideup_v8i32(<8 x i32> %x) { ; CHECK-LABEL: slideup_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v10, v8, 3 -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %s = shufflevector <8 x i32> %x, <8 x i32> poison, <8 x i32> ret <8 x i32> %s @@ -577,9 +582,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v9, v8, 2 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 6 -; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %s = shufflevector <8 x i16> %x, <8 x i16> poison, <8 x i32> ret <8 x i16> %s @@ -590,9 +595,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 3, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v10, v8, 5 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v10, v8, 3 -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %s = shufflevector <8 x i32> %x, <8 x i32> poison, <8 x i32> ret <8 x i32> %s @@ -603,7 +608,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 6 ; CHECK-NEXT: ret %s = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> @@ -615,7 +620,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 3, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 5 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %s = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -951,7 +951,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 3 ; CHECK-NEXT: vmerge.vim v10, v10, 2, v0 -; CHECK-NEXT: vsetivli zero, 7, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v10, v9, 6 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v11, 0 @@ -959,7 +959,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmv.s.x v12, a1 -; CHECK-NEXT: vsetivli zero, 7, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v11, v9, 6 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: lui a1, %hi(.LCPI53_0) @@ -987,7 +987,7 @@ ; CHECK-NEXT: lui a1, 524288 ; CHECK-NEXT: vmv.s.x v9, a1 ; CHECK-NEXT: vmv.v.i v10, 0 -; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v10, v9, 2 ; CHECK-NEXT: lui a1, %hi(.LCPI54_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI54_0) @@ -1000,7 +1000,9 @@ ; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: vmv.s.x v9, a1 ; CHECK-NEXT: vmv.v.i v10, 2 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v10, v9, 3 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -1215,7 +1217,7 @@ ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v8, a1 ; RV32-NEXT: vmv.v.i v9, 0 -; RV32-NEXT: vsetivli zero, 3, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu ; RV32-NEXT: vslideup.vi v9, v8, 2 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vsra.vv v8, v10, v9 @@ -4209,7 +4211,7 @@ ; LMULMAX1-RV32-NEXT: lui a2, 524288 ; LMULMAX1-RV32-NEXT: vmv.s.x v10, a2 ; LMULMAX1-RV32-NEXT: vmv.v.i v11, 0 -; LMULMAX1-RV32-NEXT: vsetivli zero, 3, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu ; LMULMAX1-RV32-NEXT: vslideup.vi v11, v10, 2 ; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI131_0) ; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI131_0) @@ -4222,7 +4224,9 @@ ; LMULMAX1-RV32-NEXT: li a2, 1 ; LMULMAX1-RV32-NEXT: vmv.s.x v12, a2 ; LMULMAX1-RV32-NEXT: vmv.v.i v13, 2 +; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; LMULMAX1-RV32-NEXT: vslideup.vi v13, v12, 3 +; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; LMULMAX1-RV32-NEXT: vsrl.vv v9, v9, v13 ; LMULMAX1-RV32-NEXT: vmulhu.vv v10, v8, v10 ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v10 @@ -4269,7 +4273,7 @@ ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmv.s.x v12, a1 ; LMULMAX2-RV32-NEXT: vmv.v.i v14, 0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 6, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 6, e32, m2, tu, mu ; LMULMAX2-RV32-NEXT: vslideup.vi v14, v12, 5 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: vmulhu.vv v8, v8, v14 @@ -4291,7 +4295,7 @@ ; LMULMAX2-RV64-NEXT: slli a1, a1, 63 ; LMULMAX2-RV64-NEXT: vmv.s.x v10, a1 ; LMULMAX2-RV64-NEXT: vmv.v.i v12, 0 -; LMULMAX2-RV64-NEXT: vsetivli zero, 3, e64, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 3, e64, m2, tu, mu ; LMULMAX2-RV64-NEXT: vslideup.vi v12, v10, 2 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI132_0) ; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI132_0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll @@ -667,6 +667,7 @@ ; RV32-LMULMAX4-NEXT: lui a0, 748384 ; RV32-LMULMAX4-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0 +; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV32-LMULMAX4-NEXT: vslideup.vi v0, v8, 1 ; RV32-LMULMAX4-NEXT: ret ; @@ -687,6 +688,7 @@ ; RV32-LMULMAX8-NEXT: lui a0, 748384 ; RV32-LMULMAX8-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0 +; RV32-LMULMAX8-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 1 ; RV32-LMULMAX8-NEXT: ret ; @@ -788,13 +790,16 @@ ; RV32-LMULMAX4-NEXT: lui a0, 748384 ; RV32-LMULMAX4-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0 +; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV32-LMULMAX4-NEXT: vslideup.vi v0, v8, 1 ; RV32-LMULMAX4-NEXT: lui a0, 945060 ; RV32-LMULMAX4-NEXT: addi a0, a0, -1793 +; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-LMULMAX4-NEXT: vmv.s.x v9, a0 ; RV32-LMULMAX4-NEXT: lui a0, 551776 ; RV32-LMULMAX4-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX4-NEXT: vmv.s.x v8, a0 +; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV32-LMULMAX4-NEXT: vslideup.vi v8, v9, 1 ; RV32-LMULMAX4-NEXT: ret ; @@ -818,17 +823,19 @@ ; RV32-LMULMAX8-NEXT: lui a0, 748384 ; RV32-LMULMAX8-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0 -; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, m1, tu, mu ; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 1 ; RV32-LMULMAX8-NEXT: lui a0, 551776 ; RV32-LMULMAX8-NEXT: addi a0, a0, 1776 +; RV32-LMULMAX8-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0 -; RV32-LMULMAX8-NEXT: vsetivli zero, 3, e32, m1, ta, mu +; RV32-LMULMAX8-NEXT: vsetivli zero, 3, e32, m1, tu, mu ; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 2 ; RV32-LMULMAX8-NEXT: lui a0, 945060 ; RV32-LMULMAX8-NEXT: addi a0, a0, -1793 +; RV32-LMULMAX8-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0 -; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 3 ; RV32-LMULMAX8-NEXT: ret ; @@ -841,6 +848,7 @@ ; RV64-LMULMAX8-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-LMULMAX8-NEXT: vmv.s.x v8, a0 ; RV64-LMULMAX8-NEXT: vmv.s.x v0, a1 +; RV64-LMULMAX8-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; RV64-LMULMAX8-NEXT: vslideup.vi v0, v8, 1 ; RV64-LMULMAX8-NEXT: ret ret <128 x i1> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -33,7 +33,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -53,7 +53,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -32,7 +32,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -53,7 +53,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -77,7 +77,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -99,7 +99,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 @@ -120,7 +120,7 @@ ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -2178,9 +2178,9 @@ ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: li a0, 32 -; RV64-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, m2, tu, mu ; RV64-NEXT: vslideup.vi v12, v10, 16 -; RV64-NEXT: vmv.v.v v8, v12 +; RV64-NEXT: vmv2r.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <32 x i8> %idxs %v = call <32 x i8> @llvm.masked.gather.v32i8.v32p0i8(<32 x i8*> %ptrs, i32 2, <32 x i1> %m, <32 x i8> %passthru) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll @@ -56,7 +56,7 @@ ; RV32-NEXT: vmerge.vim v10, v10, 1, v0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; RV32-NEXT: vslideup.vi v11, v10, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v10, v11, 0 @@ -87,6 +87,7 @@ ; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vmv.s.x v8, a0 +; RV32-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; RV32-NEXT: vslideup.vi v9, v8, 1 ; RV32-NEXT: .LBB4_4: # %else2 ; RV32-NEXT: vmv1r.v v8, v9 @@ -102,7 +103,7 @@ ; RV64-NEXT: vmerge.vim v10, v10, 1, v0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmv.v.i v11, 0 -; RV64-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; RV64-NEXT: vslideup.vi v11, v10, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v10, v11, 0 @@ -133,6 +134,7 @@ ; RV64-NEXT: or a0, a1, a0 ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vmv.s.x v8, a0 +; RV64-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; RV64-NEXT: vslideup.vi v9, v8, 1 ; RV64-NEXT: .LBB4_4: # %else2 ; RV64-NEXT: vmv1r.v v8, v9 @@ -154,7 +156,7 @@ ; RV32-NEXT: vmerge.vim v10, v10, 1, v0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; RV32-NEXT: vslideup.vi v11, v10, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v10, v11, 0 @@ -171,7 +173,7 @@ ; RV32-NEXT: lw a1, 0(a1) ; RV32-NEXT: vslide1up.vx v11, v10, a2 ; RV32-NEXT: vslide1up.vx v12, v11, a1 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, mu ; RV32-NEXT: vslideup.vi v9, v12, 0 ; RV32-NEXT: .LBB5_2: # %else ; RV32-NEXT: andi a0, a0, 2 @@ -185,7 +187,7 @@ ; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; RV32-NEXT: vslide1up.vx v8, v10, a1 ; RV32-NEXT: vslide1up.vx v10, v8, a0 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, tu, mu ; RV32-NEXT: vslideup.vi v9, v10, 1 ; RV32-NEXT: .LBB5_4: # %else2 ; RV32-NEXT: vmv1r.v v8, v9 @@ -201,7 +203,7 @@ ; RV64-NEXT: vmerge.vim v10, v10, 1, v0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmv.v.i v11, 0 -; RV64-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; RV64-NEXT: vslideup.vi v11, v10, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v10, v11, 0 @@ -231,7 +233,7 @@ ; RV64-NEXT: slli a1, a1, 32 ; RV64-NEXT: or a0, a1, a0 ; RV64-NEXT: vmv.s.x v8, a0 -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, mu ; RV64-NEXT: vslideup.vi v9, v8, 1 ; RV64-NEXT: .LBB5_4: # %else2 ; RV64-NEXT: vmv1r.v v8, v9 @@ -253,7 +255,7 @@ ; RV32-NEXT: vmerge.vim v10, v10, 1, v0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; RV32-NEXT: vslideup.vi v11, v10, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v10, v11, 0 @@ -330,7 +332,7 @@ ; RV64-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmv.v.i v12, 0 -; RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; RV64-NEXT: vslideup.vi v12, v9, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v9, v12, 0 @@ -413,7 +415,7 @@ ; RV32-NEXT: vmerge.vim v10, v10, 1, v0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; RV32-NEXT: vslideup.vi v11, v10, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v10, v11, 0 @@ -458,7 +460,7 @@ ; RV64-NEXT: vmerge.vim v10, v10, 1, v0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmv.v.i v11, 0 -; RV64-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; RV64-NEXT: vslideup.vi v11, v10, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v10, v11, 0 @@ -512,7 +514,7 @@ ; RV32-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.v.i v9, 0 -; RV32-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; RV32-NEXT: vslideup.vi v9, v8, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v8, v9, 0 @@ -554,8 +556,10 @@ ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: or a0, a0, a2 ; RV32-NEXT: vmv.s.x v9, a0 +; RV32-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV32-NEXT: vslideup.vi v8, v9, 1 ; RV32-NEXT: .LBB8_4: # %else2 +; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-NEXT: vse32.v v8, (a1) ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -570,7 +574,7 @@ ; RV64-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmv.v.i v9, 0 -; RV64-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; RV64-NEXT: vslideup.vi v9, v8, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v8, v9, 0 @@ -612,8 +616,10 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: or a0, a0, a2 ; RV64-NEXT: vmv.s.x v9, a0 +; RV64-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV64-NEXT: vslideup.vi v8, v9, 1 ; RV64-NEXT: .LBB8_4: # %else2 +; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64-NEXT: vse32.v v8, (a1) ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -636,7 +642,7 @@ ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v10, v9, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v9, v10, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll @@ -9,7 +9,7 @@ define <2 x half> @vfma_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -32,7 +32,7 @@ define <2 x half> @vfma_vf_v2f16(<2 x half> %va, half %b, <2 x half> %vc, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -60,7 +60,7 @@ define <4 x half> @vfma_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -83,7 +83,7 @@ define <4 x half> @vfma_vf_v4f16(<4 x half> %va, half %b, <4 x half> %vc, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -111,9 +111,9 @@ define <8 x half> @vfma_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 %evl) ret <8 x half> %v @@ -134,7 +134,7 @@ define <8 x half> @vfma_vf_v8f16(<8 x half> %va, half %b, <8 x half> %vc, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -162,9 +162,9 @@ define <16 x half> @vfma_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu ; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 %evl) ret <16 x half> %v @@ -185,7 +185,7 @@ define <16 x half> @vfma_vf_v16f16(<16 x half> %va, half %b, <16 x half> %vc, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -213,7 +213,7 @@ define <2 x float> @vfma_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -236,7 +236,7 @@ define <2 x float> @vfma_vf_v2f32(<2 x float> %va, float %b, <2 x float> %vc, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -264,9 +264,9 @@ define <4 x float> @vfma_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 %evl) ret <4 x float> %v @@ -287,7 +287,7 @@ define <4 x float> @vfma_vf_v4f32(<4 x float> %va, float %b, <4 x float> %vc, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -315,9 +315,9 @@ define <8 x float> @vfma_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 %evl) ret <8 x float> %v @@ -338,7 +338,7 @@ define <8 x float> @vfma_vf_v8f32(<8 x float> %va, float %b, <8 x float> %vc, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -366,9 +366,9 @@ define <16 x float> @vfma_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu ; CHECK-NEXT: vfmadd.vv v12, v8, v16, v0.t -; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 %evl) ret <16 x float> %v @@ -389,7 +389,7 @@ define <16 x float> @vfma_vf_v16f32(<16 x float> %va, float %b, <16 x float> %vc, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -417,9 +417,9 @@ define <2 x double> @vfma_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 %evl) ret <2 x double> %v @@ -440,7 +440,7 @@ define <2 x double> @vfma_vf_v2f64(<2 x double> %va, double %b, <2 x double> %vc, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -468,9 +468,9 @@ define <4 x double> @vfma_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 %evl) ret <4 x double> %v @@ -491,7 +491,7 @@ define <4 x double> @vfma_vf_v4f64(<4 x double> %va, double %b, <4 x double> %vc, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -519,9 +519,9 @@ define <8 x double> @vfma_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vfmadd.vv v12, v8, v16, v0.t -; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 %evl) ret <8 x double> %v @@ -542,7 +542,7 @@ define <8 x double> @vfma_vf_v8f64(<8 x double> %va, double %b, <8 x double> %vc, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -572,9 +572,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu ; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t -; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret %v = call <15 x double> @llvm.vp.fma.v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 %evl) ret <15 x double> %v @@ -601,9 +601,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu ; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t -; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.fma.v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 %evl) ret <16 x double> %v @@ -626,7 +626,7 @@ define <16 x double> @vfma_vf_v16f64(<16 x double> %va, double %b, <16 x double> %vc, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 @@ -705,7 +705,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: li a2, 24 @@ -735,7 +735,7 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: .LBB50_4: -; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e64, m8, tu, mu ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 5 @@ -750,7 +750,7 @@ ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t -; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 40 ; CHECK-NEXT: mul a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -311,7 +311,7 @@ ; RV64-NEXT: vmv1r.v v0, v10 ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: li a0, 32 -; RV64-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, m2, tu, mu ; RV64-NEXT: vslideup.vi v8, v12, 16 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <32 x i8> %idxs diff --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll @@ -61,7 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv4i8( %vec, %subvec, i64 0) @@ -76,7 +76,7 @@ ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a1, a1, a0 ; CHECK-NEXT: add a0, a1, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv4i8( %vec, %subvec, i64 3) @@ -214,7 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 0) @@ -227,7 +227,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 1) @@ -239,7 +239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v11, v16, 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 6) @@ -251,7 +251,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 0) @@ -264,7 +264,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 1) @@ -278,7 +278,7 @@ ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 2) @@ -293,7 +293,7 @@ ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a1, a1, a0 ; CHECK-NEXT: add a0, a1, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 3) @@ -307,7 +307,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: slli a1, a0, 3 ; CHECK-NEXT: sub a0, a1, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 7) @@ -321,7 +321,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: slli a1, a0, 3 ; CHECK-NEXT: sub a0, a1, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v9, v10, a0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 15) @@ -333,7 +333,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 0) @@ -346,7 +346,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 2) @@ -359,7 +359,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v14, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 26) @@ -382,7 +382,7 @@ ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v22, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -395,7 +395,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v0, v8, 0 ; CHECK-NEXT: ret %vec = call @llvm.experimental.vector.insert.nxv8i1.nxv32i1( %v, %sv, i64 0) @@ -408,7 +408,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v0, v8, a0 ; CHECK-NEXT: ret %vec = call @llvm.experimental.vector.insert.nxv8i1.nxv32i1( %v, %sv, i64 8) @@ -427,7 +427,7 @@ ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v0, v9, 0 @@ -450,7 +450,7 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v9, v8, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v0, v9, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll @@ -17,7 +17,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -30,7 +30,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -65,7 +65,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -87,7 +87,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -100,7 +100,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -122,7 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -135,7 +135,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -157,7 +157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -170,7 +170,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -205,7 +205,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -227,7 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -240,7 +240,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -275,7 +275,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -310,7 +310,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -332,7 +332,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -345,7 +345,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -367,7 +367,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -380,7 +380,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -402,7 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -415,7 +415,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -437,7 +437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -450,7 +450,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -472,7 +472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -485,7 +485,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -507,7 +507,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -520,7 +520,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll @@ -17,7 +17,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -30,7 +30,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -65,7 +65,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -87,7 +87,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -100,7 +100,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -122,7 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -135,7 +135,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -157,7 +157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -170,7 +170,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -205,7 +205,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -227,7 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -240,7 +240,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -275,7 +275,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -310,7 +310,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -332,7 +332,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -345,7 +345,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -367,7 +367,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -380,7 +380,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -402,7 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -415,7 +415,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -437,7 +437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -450,7 +450,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -472,7 +472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -485,7 +485,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -507,7 +507,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -520,7 +520,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll @@ -9,7 +9,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -27,7 +27,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -44,7 +44,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -62,7 +62,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -79,7 +79,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -97,7 +97,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -114,7 +114,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 3, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -132,7 +132,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 @@ -149,7 +149,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 3, e8, m2, tu, mu ; CHECK-NEXT: vslideup.vi v10, v8, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v10, 1 @@ -167,7 +167,7 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu ; CHECK-NEXT: vslideup.vx v10, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v10, 1 @@ -184,7 +184,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 3, e8, m4, tu, mu ; CHECK-NEXT: vslideup.vi v12, v8, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 1 @@ -202,7 +202,7 @@ ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu ; CHECK-NEXT: vslideup.vx v12, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 1 @@ -219,7 +219,7 @@ ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 3, e8, m8, tu, mu ; CHECK-NEXT: vslideup.vi v16, v8, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v16, 1 @@ -237,7 +237,7 @@ ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu ; CHECK-NEXT: vslideup.vx v16, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v16, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll @@ -17,7 +17,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -30,7 +30,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -65,7 +65,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -87,7 +87,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -100,7 +100,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -122,7 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -135,7 +135,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -157,7 +157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -170,7 +170,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -205,7 +205,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -227,7 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -240,7 +240,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -275,7 +275,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -310,7 +310,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -332,7 +332,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -345,7 +345,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -367,7 +367,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -380,7 +380,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -402,7 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -415,7 +415,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -437,7 +437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -450,7 +450,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -472,7 +472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -485,7 +485,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -507,7 +507,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -520,7 +520,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -542,7 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -555,7 +555,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -577,7 +577,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -590,7 +590,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -612,7 +612,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -625,7 +625,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -639,7 +639,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vslide1up.vx v10, v9, a1 ; CHECK-NEXT: vslide1up.vx v9, v10, a0 -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -653,7 +653,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vslide1up.vx v10, v9, a1 ; CHECK-NEXT: vslide1up.vx v9, v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -668,7 +668,7 @@ ; CHECK-NEXT: vslide1up.vx v10, v9, a1 ; CHECK-NEXT: vslide1up.vx v9, v10, a0 ; CHECK-NEXT: addi a0, a2, 1 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a2 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -682,7 +682,7 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vslide1up.vx v12, v10, a1 ; CHECK-NEXT: vslide1up.vx v10, v12, a0 -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -696,7 +696,7 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vslide1up.vx v12, v10, a1 ; CHECK-NEXT: vslide1up.vx v10, v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -711,7 +711,7 @@ ; CHECK-NEXT: vslide1up.vx v12, v10, a1 ; CHECK-NEXT: vslide1up.vx v10, v12, a0 ; CHECK-NEXT: addi a0, a2, 1 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a2 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -725,7 +725,7 @@ ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vslide1up.vx v16, v12, a1 ; CHECK-NEXT: vslide1up.vx v12, v16, a0 -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -739,7 +739,7 @@ ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vslide1up.vx v16, v12, a1 ; CHECK-NEXT: vslide1up.vx v12, v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -754,7 +754,7 @@ ; CHECK-NEXT: vslide1up.vx v16, v12, a1 ; CHECK-NEXT: vslide1up.vx v12, v16, a0 ; CHECK-NEXT: addi a0, a2, 1 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a2 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -768,7 +768,7 @@ ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vslide1up.vx v24, v16, a1 ; CHECK-NEXT: vslide1up.vx v16, v24, a0 -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -782,7 +782,7 @@ ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vslide1up.vx v24, v16, a1 ; CHECK-NEXT: vslide1up.vx v16, v24, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -797,7 +797,7 @@ ; CHECK-NEXT: vslide1up.vx v24, v16, a1 ; CHECK-NEXT: vslide1up.vx v16, v24, a0 ; CHECK-NEXT: addi a0, a2, 1 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a2 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -822,7 +822,7 @@ ; CHECK-NEXT: li a0, 10 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 10, i32 3 @@ -836,7 +836,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 10, i32 %idx @@ -860,7 +860,7 @@ ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 -1, i32 3 @@ -874,7 +874,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 -1, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll @@ -17,7 +17,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -30,7 +30,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -65,7 +65,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -87,7 +87,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -100,7 +100,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -122,7 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -135,7 +135,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -157,7 +157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -170,7 +170,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -205,7 +205,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -227,7 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -240,7 +240,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -275,7 +275,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -310,7 +310,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -332,7 +332,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -345,7 +345,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -367,7 +367,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -380,7 +380,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -402,7 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -415,7 +415,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -437,7 +437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -450,7 +450,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e16, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -472,7 +472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -485,7 +485,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -507,7 +507,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -520,7 +520,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -542,7 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -555,7 +555,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -577,7 +577,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -590,7 +590,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -612,7 +612,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -625,7 +625,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -647,7 +647,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -661,7 +661,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: sext.w a0, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -683,7 +683,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -697,7 +697,7 @@ ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: sext.w a0, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -719,7 +719,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -733,7 +733,7 @@ ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: sext.w a0, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -755,7 +755,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -769,7 +769,7 @@ ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: sext.w a0, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll @@ -2521,7 +2521,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: srli a0, a0, 3 ; RV32-NEXT: add a1, a0, a0 -; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; RV32-NEXT: vslideup.vx v0, v24, a0 ; RV32-NEXT: ret ; @@ -2534,7 +2534,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: srli a0, a0, 3 ; RV64-NEXT: add a1, a0, a0 -; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; RV64-NEXT: vslideup.vx v0, v24, a0 ; RV64-NEXT: ret %vc = fcmp oeq %va, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll @@ -3235,7 +3235,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; CHECK-NEXT: vmseq.vi v24, v16, 0 ; CHECK-NEXT: vmseq.vi v0, v8, 0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v0, v24, a0 ; CHECK-NEXT: ret %vc = icmp eq %va, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -3693,7 +3693,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v8, (a0) @@ -3736,7 +3736,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v8, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll @@ -21,7 +21,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i8( %a, %b, i32 -1) @@ -36,7 +36,7 @@ ; CHECK-NEXT: addi a0, a0, -2 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i8( %a, %b, i32 -2) @@ -51,7 +51,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i8( %a, %b, i32 1) @@ -76,7 +76,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i8( %a, %b, i32 -1) @@ -91,7 +91,7 @@ ; CHECK-NEXT: addi a0, a0, -4 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i8( %a, %b, i32 -4) @@ -106,7 +106,7 @@ ; CHECK-NEXT: addi a0, a0, -3 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i8( %a, %b, i32 3) @@ -131,7 +131,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i8( %a, %b, i32 -1) @@ -146,7 +146,7 @@ ; CHECK-NEXT: addi a0, a0, -8 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i8( %a, %b, i32 -8) @@ -161,7 +161,7 @@ ; CHECK-NEXT: addi a0, a0, -7 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i8( %a, %b, i32 7) @@ -185,7 +185,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i8( %a, %b, i32 -1) @@ -199,7 +199,7 @@ ; CHECK-NEXT: addi a0, a0, -16 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i8( %a, %b, i32 -16) @@ -213,7 +213,7 @@ ; CHECK-NEXT: addi a0, a0, -15 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i8( %a, %b, i32 15) @@ -238,7 +238,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 -1) @@ -254,7 +254,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 -32) @@ -269,7 +269,7 @@ ; CHECK-NEXT: addi a0, a0, -31 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 31) @@ -294,7 +294,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i8( %a, %b, i32 -1) @@ -310,7 +310,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i8( %a, %b, i32 -64) @@ -326,7 +326,7 @@ ; CHECK-NEXT: li a1, 63 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a1 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i8( %a, %b, i32 63) @@ -351,7 +351,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv64i8( %a, %b, i32 -1) @@ -367,7 +367,7 @@ ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv64i8( %a, %b, i32 -128) @@ -383,7 +383,7 @@ ; CHECK-NEXT: li a1, 127 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a1 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv64i8( %a, %b, i32 127) @@ -408,7 +408,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i16( %a, %b, i32 -1) @@ -423,7 +423,7 @@ ; CHECK-NEXT: addi a0, a0, -2 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i16( %a, %b, i32 -2) @@ -438,7 +438,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i16( %a, %b, i32 1) @@ -463,7 +463,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i16( %a, %b, i32 -1) @@ -478,7 +478,7 @@ ; CHECK-NEXT: addi a0, a0, -4 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i16( %a, %b, i32 -4) @@ -493,7 +493,7 @@ ; CHECK-NEXT: addi a0, a0, -3 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i16( %a, %b, i32 3) @@ -518,7 +518,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i16( %a, %b, i32 -1) @@ -533,7 +533,7 @@ ; CHECK-NEXT: addi a0, a0, -8 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i16( %a, %b, i32 -8) @@ -548,7 +548,7 @@ ; CHECK-NEXT: addi a0, a0, -7 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i16( %a, %b, i32 7) @@ -572,7 +572,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 -1) @@ -586,7 +586,7 @@ ; CHECK-NEXT: addi a0, a0, -16 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 -16) @@ -600,7 +600,7 @@ ; CHECK-NEXT: addi a0, a0, -15 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 15) @@ -625,7 +625,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i16( %a, %b, i32 -1) @@ -641,7 +641,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i16( %a, %b, i32 -32) @@ -656,7 +656,7 @@ ; CHECK-NEXT: addi a0, a0, -31 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i16( %a, %b, i32 31) @@ -681,7 +681,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i16( %a, %b, i32 -1) @@ -697,7 +697,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i16( %a, %b, i32 -64) @@ -713,7 +713,7 @@ ; CHECK-NEXT: li a1, 63 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a1 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i16( %a, %b, i32 63) @@ -738,7 +738,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i32( %a, %b, i32 -1) @@ -753,7 +753,7 @@ ; CHECK-NEXT: addi a0, a0, -2 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i32( %a, %b, i32 -2) @@ -768,7 +768,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i32( %a, %b, i32 1) @@ -793,7 +793,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i32( %a, %b, i32 -1) @@ -808,7 +808,7 @@ ; CHECK-NEXT: addi a0, a0, -4 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i32( %a, %b, i32 -4) @@ -823,7 +823,7 @@ ; CHECK-NEXT: addi a0, a0, -3 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i32( %a, %b, i32 3) @@ -848,7 +848,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 -1) @@ -863,7 +863,7 @@ ; CHECK-NEXT: addi a0, a0, -8 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 -8) @@ -878,7 +878,7 @@ ; CHECK-NEXT: addi a0, a0, -7 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 7) @@ -902,7 +902,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i32( %a, %b, i32 -1) @@ -916,7 +916,7 @@ ; CHECK-NEXT: addi a0, a0, -16 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i32( %a, %b, i32 -16) @@ -930,7 +930,7 @@ ; CHECK-NEXT: addi a0, a0, -15 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i32( %a, %b, i32 15) @@ -955,7 +955,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i32( %a, %b, i32 -1) @@ -971,7 +971,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i32( %a, %b, i32 -32) @@ -986,7 +986,7 @@ ; CHECK-NEXT: addi a0, a0, -31 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i32( %a, %b, i32 31) @@ -1011,7 +1011,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i64( %a, %b, i32 -1) @@ -1026,7 +1026,7 @@ ; CHECK-NEXT: addi a0, a0, -2 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i64( %a, %b, i32 -2) @@ -1041,7 +1041,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i64( %a, %b, i32 1) @@ -1066,7 +1066,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 -1) @@ -1081,7 +1081,7 @@ ; CHECK-NEXT: addi a0, a0, -4 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 -4) @@ -1096,7 +1096,7 @@ ; CHECK-NEXT: addi a0, a0, -3 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 3) @@ -1121,7 +1121,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i64( %a, %b, i32 -1) @@ -1136,7 +1136,7 @@ ; CHECK-NEXT: addi a0, a0, -8 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i64( %a, %b, i32 -8) @@ -1151,7 +1151,7 @@ ; CHECK-NEXT: addi a0, a0, -7 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i64( %a, %b, i32 7) @@ -1175,7 +1175,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i64( %a, %b, i32 -1) @@ -1189,7 +1189,7 @@ ; CHECK-NEXT: addi a0, a0, -16 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i64( %a, %b, i32 -16) @@ -1203,7 +1203,7 @@ ; CHECK-NEXT: addi a0, a0, -15 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i64( %a, %b, i32 15) @@ -1228,7 +1228,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f16( %a, %b, i32 -1) @@ -1243,7 +1243,7 @@ ; CHECK-NEXT: addi a0, a0, -2 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f16( %a, %b, i32 -2) @@ -1258,7 +1258,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f16( %a, %b, i32 1) @@ -1283,7 +1283,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 -1) @@ -1298,7 +1298,7 @@ ; CHECK-NEXT: addi a0, a0, -4 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 -4) @@ -1313,7 +1313,7 @@ ; CHECK-NEXT: addi a0, a0, -3 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 3) @@ -1338,7 +1338,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 -1) @@ -1353,7 +1353,7 @@ ; CHECK-NEXT: addi a0, a0, -8 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 -8) @@ -1368,7 +1368,7 @@ ; CHECK-NEXT: addi a0, a0, -7 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 7) @@ -1392,7 +1392,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 -1) @@ -1406,7 +1406,7 @@ ; CHECK-NEXT: addi a0, a0, -16 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 -16) @@ -1420,7 +1420,7 @@ ; CHECK-NEXT: addi a0, a0, -15 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 15) @@ -1445,7 +1445,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f16( %a, %b, i32 -1) @@ -1461,7 +1461,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f16( %a, %b, i32 -32) @@ -1476,7 +1476,7 @@ ; CHECK-NEXT: addi a0, a0, -31 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f16( %a, %b, i32 31) @@ -1501,7 +1501,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32f16( %a, %b, i32 -1) @@ -1517,7 +1517,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32f16( %a, %b, i32 -64) @@ -1533,7 +1533,7 @@ ; CHECK-NEXT: li a1, 63 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a1 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32f16( %a, %b, i32 63) @@ -1558,7 +1558,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f32( %a, %b, i32 -1) @@ -1573,7 +1573,7 @@ ; CHECK-NEXT: addi a0, a0, -2 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f32( %a, %b, i32 -2) @@ -1588,7 +1588,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f32( %a, %b, i32 1) @@ -1613,7 +1613,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 -1) @@ -1628,7 +1628,7 @@ ; CHECK-NEXT: addi a0, a0, -4 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 -4) @@ -1643,7 +1643,7 @@ ; CHECK-NEXT: addi a0, a0, -3 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 3) @@ -1668,7 +1668,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 -1) @@ -1683,7 +1683,7 @@ ; CHECK-NEXT: addi a0, a0, -8 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 -8) @@ -1698,7 +1698,7 @@ ; CHECK-NEXT: addi a0, a0, -7 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 7) @@ -1722,7 +1722,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f32( %a, %b, i32 -1) @@ -1736,7 +1736,7 @@ ; CHECK-NEXT: addi a0, a0, -16 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f32( %a, %b, i32 -16) @@ -1750,7 +1750,7 @@ ; CHECK-NEXT: addi a0, a0, -15 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f32( %a, %b, i32 15) @@ -1775,7 +1775,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f32( %a, %b, i32 -1) @@ -1791,7 +1791,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f32( %a, %b, i32 -32) @@ -1806,7 +1806,7 @@ ; CHECK-NEXT: addi a0, a0, -31 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f32( %a, %b, i32 31) @@ -1831,7 +1831,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f64( %a, %b, i32 -1) @@ -1846,7 +1846,7 @@ ; CHECK-NEXT: addi a0, a0, -2 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f64( %a, %b, i32 -2) @@ -1861,7 +1861,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f64( %a, %b, i32 1) @@ -1886,7 +1886,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 -1) @@ -1901,7 +1901,7 @@ ; CHECK-NEXT: addi a0, a0, -4 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 -4) @@ -1916,7 +1916,7 @@ ; CHECK-NEXT: addi a0, a0, -3 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 3) @@ -1941,7 +1941,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f64( %a, %b, i32 -1) @@ -1956,7 +1956,7 @@ ; CHECK-NEXT: addi a0, a0, -8 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f64( %a, %b, i32 -8) @@ -1971,7 +1971,7 @@ ; CHECK-NEXT: addi a0, a0, -7 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f64( %a, %b, i32 7) @@ -1995,7 +1995,7 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f64( %a, %b, i32 -1) @@ -2009,7 +2009,7 @@ ; CHECK-NEXT: addi a0, a0, -16 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f64( %a, %b, i32 -16) @@ -2023,7 +2023,7 @@ ; CHECK-NEXT: addi a0, a0, -15 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f64( %a, %b, i32 15) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll @@ -9,7 +9,7 @@ define @vfma_vv_nxv1f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -32,7 +32,7 @@ define @vfma_vf_nxv1f16( %va, half %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -44,7 +44,7 @@ define @vfma_vf_nxv1f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f16_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -86,7 +86,7 @@ define @vfma_vv_nxv2f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -109,7 +109,7 @@ define @vfma_vf_nxv2f16( %va, half %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -121,7 +121,7 @@ define @vfma_vf_nxv2f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f16_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -163,9 +163,9 @@ define @vfma_vv_nxv4f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %v = call @llvm.vp.fma.nxv4f16( %va, %b, %c, %m, i32 %evl) ret %v @@ -186,7 +186,7 @@ define @vfma_vf_nxv4f16( %va, half %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -198,7 +198,7 @@ define @vfma_vf_nxv4f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f16_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -240,9 +240,9 @@ define @vfma_vv_nxv8f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu ; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %v = call @llvm.vp.fma.nxv8f16( %va, %b, %c, %m, i32 %evl) ret %v @@ -263,7 +263,7 @@ define @vfma_vf_nxv8f16( %va, half %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -275,7 +275,7 @@ define @vfma_vf_nxv8f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f16_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -317,9 +317,9 @@ define @vfma_vv_nxv16f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu ; CHECK-NEXT: vfmadd.vv v12, v8, v16, v0.t -; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %v = call @llvm.vp.fma.nxv16f16( %va, %b, %c, %m, i32 %evl) ret %v @@ -340,7 +340,7 @@ define @vfma_vf_nxv16f16( %va, half %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -352,7 +352,7 @@ define @vfma_vf_nxv16f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv16f16_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -395,9 +395,9 @@ ; CHECK-LABEL: vfma_vv_nxv32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu ; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t -; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.fma.nxv32f16( %va, %b, %c, %m, i32 %evl) ret %v @@ -419,7 +419,7 @@ define @vfma_vf_nxv32f16( %va, half %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -431,7 +431,7 @@ define @vfma_vf_nxv32f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv32f16_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -473,7 +473,7 @@ define @vfma_vv_nxv1f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -496,7 +496,7 @@ define @vfma_vf_nxv1f32( %va, float %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -508,7 +508,7 @@ define @vfma_vf_nxv1f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f32_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -550,9 +550,9 @@ define @vfma_vv_nxv2f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %v = call @llvm.vp.fma.nxv2f32( %va, %b, %c, %m, i32 %evl) ret %v @@ -573,7 +573,7 @@ define @vfma_vf_nxv2f32( %va, float %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -585,7 +585,7 @@ define @vfma_vf_nxv2f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f32_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -627,9 +627,9 @@ define @vfma_vv_nxv4f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %v = call @llvm.vp.fma.nxv4f32( %va, %b, %c, %m, i32 %evl) ret %v @@ -650,7 +650,7 @@ define @vfma_vf_nxv4f32( %va, float %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -662,7 +662,7 @@ define @vfma_vf_nxv4f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f32_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -704,9 +704,9 @@ define @vfma_vv_nxv8f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu ; CHECK-NEXT: vfmadd.vv v12, v8, v16, v0.t -; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %v = call @llvm.vp.fma.nxv8f32( %va, %b, %c, %m, i32 %evl) ret %v @@ -727,7 +727,7 @@ define @vfma_vf_nxv8f32( %va, float %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -739,7 +739,7 @@ define @vfma_vf_nxv8f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f32_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -782,9 +782,9 @@ ; CHECK-LABEL: vfma_vv_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu ; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t -; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.fma.nxv16f32( %va, %b, %c, %m, i32 %evl) ret %v @@ -806,7 +806,7 @@ define @vfma_vf_nxv16f32( %va, float %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -818,7 +818,7 @@ define @vfma_vf_nxv16f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv16f32_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -860,9 +860,9 @@ define @vfma_vv_nxv1f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %v = call @llvm.vp.fma.nxv1f64( %va, %b, %c, %m, i32 %evl) ret %v @@ -883,7 +883,7 @@ define @vfma_vf_nxv1f64( %va, double %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -895,7 +895,7 @@ define @vfma_vf_nxv1f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f64_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -937,9 +937,9 @@ define @vfma_vv_nxv2f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %v = call @llvm.vp.fma.nxv2f64( %va, %b, %c, %m, i32 %evl) ret %v @@ -960,7 +960,7 @@ define @vfma_vf_nxv2f64( %va, double %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -972,7 +972,7 @@ define @vfma_vf_nxv2f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f64_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1014,9 +1014,9 @@ define @vfma_vv_nxv4f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vfmadd.vv v12, v8, v16, v0.t -; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %v = call @llvm.vp.fma.nxv4f64( %va, %b, %c, %m, i32 %evl) ret %v @@ -1037,7 +1037,7 @@ define @vfma_vf_nxv4f64( %va, double %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1049,7 +1049,7 @@ define @vfma_vf_nxv4f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f64_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1092,9 +1092,9 @@ ; CHECK-LABEL: vfma_vv_nxv7f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu ; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t -; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.fma.nxv7f64( %va, %b, %c, %m, i32 %evl) ret %v @@ -1119,9 +1119,9 @@ ; CHECK-LABEL: vfma_vv_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu ; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t -; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.fma.nxv8f64( %va, %b, %c, %m, i32 %evl) ret %v @@ -1143,7 +1143,7 @@ define @vfma_vf_nxv8f64( %va, double %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1155,7 +1155,7 @@ define @vfma_vf_nxv8f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f64_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1250,7 +1250,7 @@ ; CHECK-NEXT: vl8re64.v v8, (a0) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, tu, mu ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: mul a0, a0, a2 @@ -1279,7 +1279,7 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a4, a1 ; CHECK-NEXT: .LBB92_4: -; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e64, m8, tu, mu ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 5