diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -1083,8 +1083,8 @@ def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1), (vti.Mask V0), VLOpFrag)), - (!cast("PseudoVNSRL_WX_"#vti.LMul.MX#"_MASK") - (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, X0, + (!cast("PseudoVNSRL_WI_"#vti.LMul.MX#"_MASK") + (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll --- a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll @@ -44,7 +44,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v8, a1 ; CHECK-V-NEXT: vmax.vx v8, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <2 x double> %x to <2 x i64> @@ -83,7 +83,7 @@ ; CHECK-V-NEXT: srli a0, a0, 32 ; CHECK-V-NEXT: vminu.vx v8, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptoui <2 x double> %x to <2 x i64> @@ -131,7 +131,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: vmax.vx v8, v8, zero ; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <2 x double> %x to <2 x i64> @@ -210,7 +210,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v10, a1 ; CHECK-V-NEXT: vmax.vx v10, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <4 x float> %x to <4 x i64> @@ -268,7 +268,7 @@ ; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-V-NEXT: vminu.vx v10, v10, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ret entry: %conv = fptoui <4 x float> %x to <4 x i64> @@ -345,7 +345,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v10, a0 ; CHECK-V-NEXT: vmax.vx v10, v8, zero ; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <4 x float> %x to <4 x i64> @@ -511,7 +511,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v10, a1 ; CHECK-V-NEXT: vmax.vx v10, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -661,7 +661,7 @@ ; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-V-NEXT: vminu.vx v10, v10, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -830,7 +830,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v10, a0 ; CHECK-V-NEXT: vmax.vx v10, v8, zero ; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -890,7 +890,7 @@ ; CHECK-V-NEXT: lui a0, 1048568 ; CHECK-V-NEXT: vmax.vx v8, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <2 x double> %x to <2 x i32> @@ -929,7 +929,7 @@ ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vminu.vx v8, v9, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptoui <2 x double> %x to <2 x i32> @@ -977,7 +977,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v9, a0 ; CHECK-V-NEXT: vmax.vx v8, v8, zero ; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <2 x double> %x to <2 x i32> @@ -1058,7 +1058,7 @@ ; CHECK-V-NEXT: lui a0, 1048568 ; CHECK-V-NEXT: vmax.vx v8, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <4 x float> %x to <4 x i32> @@ -1115,7 +1115,7 @@ ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vminu.vx v8, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptoui <4 x float> %x to <4 x i32> @@ -1191,7 +1191,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: vmax.vx v8, v8, zero ; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <4 x float> %x to <4 x i32> @@ -1500,7 +1500,7 @@ ; CHECK-V-NEXT: lui a0, 1048568 ; CHECK-V-NEXT: vmax.vx v10, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 80(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 72(sp) # 8-byte Folded Reload @@ -1774,7 +1774,7 @@ ; CHECK-V-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-V-NEXT: vminu.vx v10, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 80(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 72(sp) # 8-byte Folded Reload @@ -2087,7 +2087,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: vmax.vx v10, v8, zero ; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 80(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 72(sp) # 8-byte Folded Reload @@ -3456,7 +3456,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v8, a1 ; CHECK-V-NEXT: vmax.vx v8, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <2 x double> %x to <2 x i64> @@ -3493,7 +3493,7 @@ ; CHECK-V-NEXT: srli a0, a0, 32 ; CHECK-V-NEXT: vminu.vx v8, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptoui <2 x double> %x to <2 x i64> @@ -3540,7 +3540,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: vmax.vx v8, v8, zero ; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <2 x double> %x to <2 x i64> @@ -3617,7 +3617,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v10, a1 ; CHECK-V-NEXT: vmax.vx v10, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <4 x float> %x to <4 x i64> @@ -3673,7 +3673,7 @@ ; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-V-NEXT: vminu.vx v10, v10, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ret entry: %conv = fptoui <4 x float> %x to <4 x i64> @@ -3749,7 +3749,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v10, a0 ; CHECK-V-NEXT: vmax.vx v10, v8, zero ; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <4 x float> %x to <4 x i64> @@ -3913,7 +3913,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v10, a1 ; CHECK-V-NEXT: vmax.vx v10, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -4061,7 +4061,7 @@ ; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-V-NEXT: vminu.vx v10, v10, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -4229,7 +4229,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v10, a0 ; CHECK-V-NEXT: vmax.vx v10, v8, zero ; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -4287,7 +4287,7 @@ ; CHECK-V-NEXT: lui a0, 1048568 ; CHECK-V-NEXT: vmax.vx v8, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <2 x double> %x to <2 x i32> @@ -4324,7 +4324,7 @@ ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vminu.vx v8, v9, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptoui <2 x double> %x to <2 x i32> @@ -4371,7 +4371,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v9, a0 ; CHECK-V-NEXT: vmax.vx v8, v8, zero ; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <2 x double> %x to <2 x i32> @@ -4450,7 +4450,7 @@ ; CHECK-V-NEXT: lui a0, 1048568 ; CHECK-V-NEXT: vmax.vx v8, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <4 x float> %x to <4 x i32> @@ -4505,7 +4505,7 @@ ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vminu.vx v8, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptoui <4 x float> %x to <4 x i32> @@ -4580,7 +4580,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: vmax.vx v8, v8, zero ; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v8 +; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: %conv = fptosi <4 x float> %x to <4 x i32> @@ -4887,7 +4887,7 @@ ; CHECK-V-NEXT: lui a0, 1048568 ; CHECK-V-NEXT: vmax.vx v10, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 80(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 72(sp) # 8-byte Folded Reload @@ -5157,7 +5157,7 @@ ; CHECK-V-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-V-NEXT: vminu.vx v10, v8, a0 ; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 80(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 72(sp) # 8-byte Folded Reload @@ -5469,7 +5469,7 @@ ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: vmax.vx v10, v8, zero ; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-V-NEXT: vncvt.x.x.w v8, v10 +; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 80(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 72(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll --- a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll +++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll @@ -21,7 +21,7 @@ ; RV32-NEXT: lui a0, 1048568 ; RV32-NEXT: vand.vx v8, v8, a0 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: fixedlen: @@ -32,7 +32,7 @@ ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: ret %v41 = insertelement <2 x i32> poison, i32 16, i32 0 %v42 = shufflevector <2 x i32> %v41, <2 x i32> poison, <2 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll @@ -37,7 +37,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-D-NEXT: vnsrl.wi v9, v9, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v9, v9 +; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 ; CHECK-D-NEXT: vrsub.vx v8, v9, a0 @@ -81,7 +81,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-D-NEXT: vnsrl.wi v9, v9, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v9, v9 +; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 ; CHECK-D-NEXT: vrsub.vx v8, v9, a0 @@ -125,7 +125,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-D-NEXT: vnsrl.wi v9, v10, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v9, v9 +; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 ; CHECK-D-NEXT: vrsub.vx v8, v9, a0 @@ -169,7 +169,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-D-NEXT: vnsrl.wi v10, v12, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v9, v10 +; CHECK-D-NEXT: vnsrl.wi v9, v10, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 ; CHECK-D-NEXT: vrsub.vx v8, v9, a0 @@ -213,7 +213,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-D-NEXT: vnsrl.wi v12, v16, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v10, v12 +; CHECK-D-NEXT: vnsrl.wi v10, v12, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 ; CHECK-D-NEXT: vrsub.vx v8, v10, a0 @@ -849,7 +849,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-D-NEXT: vsrl.vx v9, v9, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v9, v9 +; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v9, v9, a0 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -942,7 +942,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-D-NEXT: vsrl.vx v10, v10, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v9, v10 +; CHECK-D-NEXT: vnsrl.wi v9, v10, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v9, v9, a0 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -1035,7 +1035,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-D-NEXT: vsrl.vx v12, v12, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v10, v12 +; CHECK-D-NEXT: vnsrl.wi v10, v12, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v10, v10, a0 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -1128,7 +1128,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-D-NEXT: vsrl.vx v16, v16, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v12, v16 +; CHECK-D-NEXT: vnsrl.wi v12, v16, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v12, v12, a0 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -1654,7 +1654,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-D-NEXT: vnsrl.wi v8, v8, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v8, v8 +; CHECK-D-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret @@ -1695,7 +1695,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-D-NEXT: vnsrl.wi v8, v8, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v8, v8 +; CHECK-D-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret @@ -1736,7 +1736,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-D-NEXT: vnsrl.wi v10, v8, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v8, v10 +; CHECK-D-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret @@ -1777,7 +1777,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-D-NEXT: vnsrl.wi v12, v8, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v8, v12 +; CHECK-D-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret @@ -1818,7 +1818,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-D-NEXT: vnsrl.wi v16, v8, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v8, v16 +; CHECK-D-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret @@ -2428,7 +2428,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-D-NEXT: vsrl.vx v8, v9, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v8, v8 +; CHECK-D-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret @@ -2517,7 +2517,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-D-NEXT: vsrl.vx v8, v10, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v10, v8 +; CHECK-D-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v8, v10, a0 ; CHECK-D-NEXT: ret @@ -2606,7 +2606,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-D-NEXT: vsrl.vx v8, v12, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v12, v8 +; CHECK-D-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v8, v12, a0 ; CHECK-D-NEXT: ret @@ -2695,7 +2695,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-D-NEXT: vsrl.vx v8, v16, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v16, v8 +; CHECK-D-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v8, v16, a0 ; CHECK-D-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll @@ -37,7 +37,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-D-NEXT: vnsrl.wi v9, v9, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v9, v9 +; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 ; CHECK-D-NEXT: vsub.vx v8, v9, a0 @@ -81,7 +81,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-D-NEXT: vnsrl.wi v9, v9, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v9, v9 +; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 ; CHECK-D-NEXT: vsub.vx v8, v9, a0 @@ -125,7 +125,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-D-NEXT: vnsrl.wi v9, v10, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v9, v9 +; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 ; CHECK-D-NEXT: vsub.vx v8, v9, a0 @@ -169,7 +169,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-D-NEXT: vnsrl.wi v10, v12, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v9, v10 +; CHECK-D-NEXT: vnsrl.wi v9, v10, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 ; CHECK-D-NEXT: vsub.vx v8, v9, a0 @@ -213,7 +213,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-D-NEXT: vnsrl.wi v12, v16, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v10, v12 +; CHECK-D-NEXT: vnsrl.wi v10, v12, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 ; CHECK-D-NEXT: vsub.vx v8, v10, a0 @@ -781,7 +781,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-D-NEXT: vsrl.vx v9, v10, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v9, v9 +; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v9, v9, a0 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -862,7 +862,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-D-NEXT: vsrl.vx v10, v10, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v9, v10 +; CHECK-D-NEXT: vnsrl.wi v9, v10, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v9, v9, a0 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -943,7 +943,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-D-NEXT: vsrl.vx v12, v12, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v10, v12 +; CHECK-D-NEXT: vnsrl.wi v10, v12, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v10, v10, a0 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -1024,7 +1024,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-D-NEXT: vsrl.vx v16, v16, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v12, v16 +; CHECK-D-NEXT: vnsrl.wi v12, v16, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v12, v12, a0 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -1456,7 +1456,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-D-NEXT: vnsrl.wi v8, v8, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v8, v8 +; CHECK-D-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret @@ -1497,7 +1497,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-D-NEXT: vnsrl.wi v8, v8, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v8, v8 +; CHECK-D-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret @@ -1538,7 +1538,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-D-NEXT: vnsrl.wi v10, v8, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v8, v10 +; CHECK-D-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret @@ -1579,7 +1579,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-D-NEXT: vnsrl.wi v12, v8, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v8, v12 +; CHECK-D-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret @@ -1620,7 +1620,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-D-NEXT: vnsrl.wi v16, v8, 23 ; CHECK-D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v8, v16 +; CHECK-D-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret @@ -2162,7 +2162,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-D-NEXT: vsrl.vx v8, v9, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v8, v8 +; CHECK-D-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 ; CHECK-D-NEXT: ret @@ -2239,7 +2239,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-D-NEXT: vsrl.vx v8, v10, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v10, v8 +; CHECK-D-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v8, v10, a0 ; CHECK-D-NEXT: ret @@ -2316,7 +2316,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-D-NEXT: vsrl.vx v8, v12, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v12, v8 +; CHECK-D-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v8, v12, a0 ; CHECK-D-NEXT: ret @@ -2393,7 +2393,7 @@ ; CHECK-D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-D-NEXT: vsrl.vx v8, v16, a0 ; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; CHECK-D-NEXT: vncvt.x.x.w v16, v8 +; CHECK-D-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v8, v16, a0 ; CHECK-D-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll @@ -392,7 +392,7 @@ ; CHECK-LABEL: truncstore_nxv1i16_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -452,7 +452,7 @@ ; CHECK-LABEL: truncstore_nxv2i16_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -512,7 +512,7 @@ ; CHECK-LABEL: truncstore_nxv4i16_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -572,7 +572,7 @@ ; CHECK-LABEL: truncstore_nxv8i16_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -632,7 +632,7 @@ ; CHECK-LABEL: truncstore_nxv16i16_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v12, v8 +; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -668,7 +668,7 @@ ; CHECK-LABEL: truncstore_nxv32i16_nxv32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v8 +; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -680,9 +680,9 @@ ; CHECK-LABEL: truncstore_nxv1i32_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -694,7 +694,7 @@ ; CHECK-LABEL: truncstore_nxv1i32_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -730,9 +730,9 @@ ; CHECK-LABEL: truncstore_nxv2i32_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -744,7 +744,7 @@ ; CHECK-LABEL: truncstore_nxv2i32_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -780,9 +780,9 @@ ; CHECK-LABEL: truncstore_nxv4i32_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -794,7 +794,7 @@ ; CHECK-LABEL: truncstore_nxv4i32_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -830,9 +830,9 @@ ; CHECK-LABEL: truncstore_nxv8i32_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v12, v8 +; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v12 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -844,7 +844,7 @@ ; CHECK-LABEL: truncstore_nxv8i32_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v12, v8 +; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -880,9 +880,9 @@ ; CHECK-LABEL: truncstore_nxv16i32_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v8 +; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v16 +; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -894,7 +894,7 @@ ; CHECK-LABEL: truncstore_nxv16i32_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v8 +; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -906,11 +906,11 @@ ; CHECK-LABEL: truncstore_nxv1i64_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -922,9 +922,9 @@ ; CHECK-LABEL: truncstore_nxv1i64_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -936,7 +936,7 @@ ; CHECK-LABEL: truncstore_nxv1i64_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -948,11 +948,11 @@ ; CHECK-LABEL: truncstore_nxv2i64_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -964,9 +964,9 @@ ; CHECK-LABEL: truncstore_nxv2i64_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -978,7 +978,7 @@ ; CHECK-LABEL: truncstore_nxv2i64_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -990,11 +990,11 @@ ; CHECK-LABEL: truncstore_nxv4i64_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v12, v8 +; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v12 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1006,9 +1006,9 @@ ; CHECK-LABEL: truncstore_nxv4i64_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v12, v8 +; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v12 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1020,7 +1020,7 @@ ; CHECK-LABEL: truncstore_nxv4i64_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v12, v8 +; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1032,11 +1032,11 @@ ; CHECK-LABEL: truncstore_nxv8i64_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v8 +; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v16 +; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1048,9 +1048,9 @@ ; CHECK-LABEL: truncstore_nxv8i64_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v8 +; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v16 +; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1062,7 +1062,7 @@ ; CHECK-LABEL: truncstore_nxv8i64_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v8 +; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret %y = trunc %x to diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: vtrunc_nxv2i7_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i7> @llvm.vp.trunc.nxv2i7.nxv2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl) ret <2 x i7> %v @@ -20,7 +20,7 @@ ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i15: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i15(<2 x i15> %a, <2 x i1> %m, i32 %vl) ret <2 x i8> %v @@ -32,7 +32,7 @@ ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl) ret <2 x i8> %v @@ -42,7 +42,7 @@ ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<2 x i16> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) ret <2 x i8> %v @@ -71,7 +71,7 @@ ; CHECK-NEXT: .LBB4_2: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vncvt.x.x.w v8, v16, v0.t +; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB4_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 64 @@ -80,7 +80,7 @@ ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vncvt.x.x.w v16, v24, v0.t +; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t ; CHECK-NEXT: li a0, 128 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu ; CHECK-NEXT: vslideup.vx v16, v8, a1 @@ -100,9 +100,9 @@ ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl) ret <2 x i8> %v @@ -112,9 +112,9 @@ ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<2 x i32> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) ret <2 x i8> %v @@ -126,11 +126,11 @@ ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) ret <2 x i8> %v @@ -140,11 +140,11 @@ ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) ret <2 x i8> %v @@ -156,7 +156,7 @@ ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl) ret <2 x i16> %v @@ -166,7 +166,7 @@ ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<2 x i32> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) ret <2 x i16> %v @@ -178,9 +178,9 @@ ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) ret <2 x i16> %v @@ -190,9 +190,9 @@ ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) ret <2 x i16> %v @@ -204,9 +204,9 @@ ; CHECK-LABEL: vtrunc_nxv15i16_nxv15i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v8, v0.t +; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v16, v0.t +; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t ; CHECK-NEXT: ret %v = call <15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<15 x i64> %a, <15 x i1> %m, i32 %vl) ret <15 x i16> %v @@ -218,7 +218,7 @@ ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<2 x i64> %a, <2 x i1> %m, i32 %vl) ret <2 x i32> %v @@ -228,7 +228,7 @@ ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) ret <2 x i32> %v @@ -289,7 +289,7 @@ ; CHECK-NEXT: vle64.v v8, (a6) ; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, mu ; CHECK-NEXT: li a3, 16 -; CHECK-NEXT: vncvt.x.x.w v24, v16, v0.t +; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t ; CHECK-NEXT: csrr a6, vlenb ; CHECK-NEXT: slli a6, a6, 4 ; CHECK-NEXT: add a6, sp, a6 @@ -302,7 +302,7 @@ ; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, mu ; CHECK-NEXT: li a5, 64 ; CHECK-NEXT: vmv1r.v v0, v3 -; CHECK-NEXT: vncvt.x.x.w v16, v8, v0.t +; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t ; CHECK-NEXT: csrr a6, vlenb ; CHECK-NEXT: li t0, 48 ; CHECK-NEXT: mul a6, a6, t0 @@ -335,7 +335,7 @@ ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: li a5, 32 ; CHECK-NEXT: vsetvli zero, t0, e32, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v24, v16, v0.t +; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t ; CHECK-NEXT: csrr t0, vlenb ; CHECK-NEXT: slli t0, t0, 3 ; CHECK-NEXT: add t0, sp, t0 @@ -348,7 +348,7 @@ ; CHECK-NEXT: addi t0, a1, 384 ; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v3 -; CHECK-NEXT: vncvt.x.x.w v16, v8, v0.t +; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t ; CHECK-NEXT: csrr a6, vlenb ; CHECK-NEXT: li t1, 40 ; CHECK-NEXT: mul a6, a6, t1 @@ -372,7 +372,7 @@ ; CHECK-NEXT: .LBB16_20: ; CHECK-NEXT: vle64.v v8, (a6) ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v24, v0.t +; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a4, a3, .LBB16_22 @@ -381,7 +381,7 @@ ; CHECK-NEXT: .LBB16_22: ; CHECK-NEXT: vsetvli zero, a4, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v2 -; CHECK-NEXT: vncvt.x.x.w v24, v8, v0.t +; CHECK-NEXT: vnsrl.wi v24, v8, 0, v0.t ; CHECK-NEXT: bltu a7, a5, .LBB16_24 ; CHECK-NEXT: # %bb.23: ; CHECK-NEXT: li a7, 32 @@ -440,7 +440,7 @@ ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vncvt.x.x.w v16, v8, v0.t +; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 @@ -457,7 +457,7 @@ ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload -; CHECK-NEXT: vncvt.x.x.w v16, v8, v0.t +; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, a5, e32, m8, tu, mu ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 @@ -517,7 +517,7 @@ ; CHECK-NEXT: .LBB17_2: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vncvt.x.x.w v8, v16, v0.t +; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB17_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 @@ -526,7 +526,7 @@ ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vncvt.x.x.w v16, v24, v0.t +; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vi v16, v8, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll @@ -46,7 +46,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; LMULMAX8-NEXT: vnsrl.wi v10, v12, 23 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; LMULMAX8-NEXT: vncvt.x.x.w v9, v10 +; LMULMAX8-NEXT: vnsrl.wi v9, v10, 0 ; LMULMAX8-NEXT: li a1, 134 ; LMULMAX8-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-NEXT: vrsub.vx v8, v9, a1 @@ -845,7 +845,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; LMULMAX8-NEXT: vnsrl.wi v12, v16, 23 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; LMULMAX8-NEXT: vncvt.x.x.w v10, v12 +; LMULMAX8-NEXT: vnsrl.wi v10, v12, 0 ; LMULMAX8-NEXT: li a1, 134 ; LMULMAX8-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-NEXT: vrsub.vx v8, v10, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll @@ -46,7 +46,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; LMULMAX8-NEXT: vnsrl.wi v10, v12, 23 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; LMULMAX8-NEXT: vncvt.x.x.w v9, v10 +; LMULMAX8-NEXT: vnsrl.wi v9, v10, 0 ; LMULMAX8-NEXT: li a1, 127 ; LMULMAX8-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-NEXT: vsub.vx v8, v9, a1 @@ -742,7 +742,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; LMULMAX8-NEXT: vnsrl.wi v12, v16, 23 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; LMULMAX8-NEXT: vncvt.x.x.w v10, v12 +; LMULMAX8-NEXT: vnsrl.wi v10, v12, 0 ; LMULMAX8-NEXT: li a1, 127 ; LMULMAX8-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-NEXT: vsub.vx v8, v10, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll @@ -550,7 +550,7 @@ ; CHECK-LABEL: truncstore_v2i16_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i16> %x to <2 x i8> @@ -610,7 +610,7 @@ ; CHECK-LABEL: truncstore_v4i16_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <4 x i16> %x to <4 x i8> @@ -692,7 +692,7 @@ ; CHECK-LABEL: truncstore_v8i16_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <8 x i16> %x to <8 x i8> @@ -812,8 +812,8 @@ ; LMULMAX1-LABEL: truncstore_v16i16_v16i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -822,7 +822,7 @@ ; LMULMAX4-LABEL: truncstore_v16i16_v16i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 +; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 ; LMULMAX4-NEXT: vse8.v v10, (a0) ; LMULMAX4-NEXT: ret %y = trunc <16 x i16> %x to <16 x i8> @@ -994,9 +994,9 @@ ; CHECK-LABEL: truncstore_v2i32_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i32> %x to <2 x i8> @@ -1008,7 +1008,7 @@ ; CHECK-LABEL: truncstore_v2i32_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i32> %x to <2 x i16> @@ -1044,9 +1044,9 @@ ; CHECK-LABEL: truncstore_v4i32_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <4 x i32> %x to <4 x i8> @@ -1058,7 +1058,7 @@ ; CHECK-LABEL: truncstore_v4i32_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <4 x i32> %x to <4 x i16> @@ -1116,13 +1116,13 @@ ; LMULMAX1-LABEL: truncstore_v8i32_v8i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1131,9 +1131,9 @@ ; LMULMAX4-LABEL: truncstore_v8i32_v8i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 +; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v8, v10 +; LMULMAX4-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i32> %x to <8 x i8> @@ -1145,8 +1145,8 @@ ; LMULMAX1-LABEL: truncstore_v8i32_v8i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse16.v v8, (a0) @@ -1155,7 +1155,7 @@ ; LMULMAX4-LABEL: truncstore_v8i32_v8i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 +; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 ; LMULMAX4-NEXT: vse16.v v10, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i32> %x to <8 x i16> @@ -1227,25 +1227,25 @@ ; LMULMAX1-LABEL: truncstore_v16i32_v16i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 +; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 +; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 12 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1254,9 +1254,9 @@ ; LMULMAX4-LABEL: truncstore_v16i32_v16i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 +; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v8, v12 +; LMULMAX4-NEXT: vnsrl.wi v8, v12, 0 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <16 x i32> %x to <16 x i8> @@ -1268,13 +1268,13 @@ ; LMULMAX1-LABEL: truncstore_v16i32_v16i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 -; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 +; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 +; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: addi a1, a0, 16 @@ -1285,7 +1285,7 @@ ; LMULMAX4-LABEL: truncstore_v16i32_v16i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 +; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0 ; LMULMAX4-NEXT: vse16.v v12, (a0) ; LMULMAX4-NEXT: ret %y = trunc <16 x i32> %x to <16 x i16> @@ -1393,11 +1393,11 @@ ; CHECK-LABEL: truncstore_v2i64_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i64> %x to <2 x i8> @@ -1409,9 +1409,9 @@ ; CHECK-LABEL: truncstore_v2i64_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i64> %x to <2 x i16> @@ -1423,7 +1423,7 @@ ; CHECK-LABEL: truncstore_v2i64_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i64> %x to <2 x i32> @@ -1435,17 +1435,17 @@ ; LMULMAX1-LABEL: truncstore_v4i64_v4i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1454,11 +1454,11 @@ ; LMULMAX4-LABEL: truncstore_v4i64_v4i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 +; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v8, v10 +; LMULMAX4-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX4-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <4 x i64> %x to <4 x i8> @@ -1470,13 +1470,13 @@ ; LMULMAX1-LABEL: truncstore_v4i64_v4i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse16.v v8, (a0) @@ -1485,9 +1485,9 @@ ; LMULMAX4-LABEL: truncstore_v4i64_v4i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 +; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v8, v10 +; LMULMAX4-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX4-NEXT: vse16.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <4 x i64> %x to <4 x i16> @@ -1499,8 +1499,8 @@ ; LMULMAX1-LABEL: truncstore_v4i64_v4i32: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse32.v v8, (a0) @@ -1509,7 +1509,7 @@ ; LMULMAX4-LABEL: truncstore_v4i64_v4i32: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 +; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 ; LMULMAX4-NEXT: vse32.v v10, (a0) ; LMULMAX4-NEXT: ret %y = trunc <4 x i64> %x to <4 x i32> @@ -1521,33 +1521,33 @@ ; LMULMAX1-LABEL: truncstore_v8i64_v8i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 +; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 +; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1556,11 +1556,11 @@ ; LMULMAX4-LABEL: truncstore_v8i64_v8i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 +; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v8, v12 +; LMULMAX4-NEXT: vnsrl.wi v8, v12, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX4-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i64> %x to <8 x i8> @@ -1572,25 +1572,25 @@ ; LMULMAX1-LABEL: truncstore_v8i64_v8i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 +; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 +; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vse16.v v8, (a0) @@ -1599,9 +1599,9 @@ ; LMULMAX4-LABEL: truncstore_v8i64_v8i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 +; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v8, v12 +; LMULMAX4-NEXT: vnsrl.wi v8, v12, 0 ; LMULMAX4-NEXT: vse16.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i64> %x to <8 x i16> @@ -1613,13 +1613,13 @@ ; LMULMAX1-LABEL: truncstore_v8i64_v8i32: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 -; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 +; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 +; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: addi a1, a0, 16 @@ -1630,7 +1630,7 @@ ; LMULMAX4-LABEL: truncstore_v8i64_v8i32: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 +; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0 ; LMULMAX4-NEXT: vse32.v v12, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i64> %x to <8 x i32> @@ -1642,65 +1642,65 @@ ; LMULMAX1-LABEL: truncstore_v16i64_v16i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 +; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 6, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 +; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v12 +; LMULMAX1-NEXT: vnsrl.wi v9, v12, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 10, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 +; LMULMAX1-NEXT: vnsrl.wi v9, v13, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 10 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v14 +; LMULMAX1-NEXT: vnsrl.wi v9, v14, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 14, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 12 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 +; LMULMAX1-NEXT: vnsrl.wi v9, v15, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 14 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1709,17 +1709,17 @@ ; LMULMAX4-LABEL: truncstore_v16i64_v16i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v16, v12 +; LMULMAX4-NEXT: vnsrl.wi v16, v12, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v12, v16 +; LMULMAX4-NEXT: vnsrl.wi v12, v16, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v12, v12 +; LMULMAX4-NEXT: vnsrl.wi v12, v12, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v14, v8 +; LMULMAX4-NEXT: vnsrl.wi v14, v8, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v8, v14 +; LMULMAX4-NEXT: vnsrl.wi v8, v14, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX4-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX4-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX4-NEXT: vse8.v v8, (a0) @@ -1733,47 +1733,47 @@ ; LMULMAX1-LABEL: truncstore_v16i64_v16i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 +; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 +; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 +; LMULMAX1-NEXT: vnsrl.wi v9, v13, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v10, v12 +; LMULMAX1-NEXT: vnsrl.wi v10, v12, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 +; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v14 +; LMULMAX1-NEXT: vnsrl.wi v9, v14, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 +; LMULMAX1-NEXT: vnsrl.wi v9, v15, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 6 ; LMULMAX1-NEXT: addi a1, a0, 16 @@ -1784,13 +1784,13 @@ ; LMULMAX4-LABEL: truncstore_v16i64_v16i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v16, v12 +; LMULMAX4-NEXT: vnsrl.wi v16, v12, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v12, v16 +; LMULMAX4-NEXT: vnsrl.wi v12, v16, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v14, v8 +; LMULMAX4-NEXT: vnsrl.wi v14, v8, 0 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v8, v14 +; LMULMAX4-NEXT: vnsrl.wi v8, v14, 0 ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, tu, mu ; LMULMAX4-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX4-NEXT: vse16.v v8, (a0) @@ -1804,23 +1804,23 @@ ; LMULMAX1-LABEL: truncstore_v16i64_v16i32: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 -; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 +; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 +; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 -; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 +; LMULMAX1-NEXT: vnsrl.wi v9, v13, 0 +; LMULMAX1-NEXT: vnsrl.wi v11, v12, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v11, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 -; LMULMAX1-NEXT: vncvt.x.x.w v12, v14 +; LMULMAX1-NEXT: vnsrl.wi v9, v15, 0 +; LMULMAX1-NEXT: vnsrl.wi v12, v14, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v9, 2 ; LMULMAX1-NEXT: addi a1, a0, 48 @@ -1835,8 +1835,8 @@ ; LMULMAX4-LABEL: truncstore_v16i64_v16i32: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vncvt.x.x.w v16, v12 -; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 +; LMULMAX4-NEXT: vnsrl.wi v16, v12, 0 +; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, tu, mu ; LMULMAX4-NEXT: vslideup.vi v12, v16, 8 ; LMULMAX4-NEXT: vse32.v v12, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll @@ -369,9 +369,9 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x @@ -387,9 +387,9 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x @@ -429,9 +429,9 @@ ; LMULMAX8-NEXT: vle64.v v8, (a0) ; LMULMAX8-NEXT: vfncvt.rtz.x.f.w v12, v8 ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX8-NEXT: vncvt.x.x.w v8, v12 +; LMULMAX8-NEXT: vnsrl.wi v8, v12, 0 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX8-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX8-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX8-NEXT: vse8.v v8, (a1) ; LMULMAX8-NEXT: ret ; @@ -447,31 +447,31 @@ ; LMULMAX1-NEXT: vle64.v v11, (a0) ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v12, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v12 +; LMULMAX1-NEXT: vnsrl.wi v9, v12, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v12, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 +; LMULMAX1-NEXT: vnsrl.wi v11, v12, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v11, v11 +; LMULMAX1-NEXT: vnsrl.wi v11, v11, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v11, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v10, v11 +; LMULMAX1-NEXT: vnsrl.wi v10, v11, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 +; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0 ; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v10, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v10, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v10 +; LMULMAX1-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 6 ; LMULMAX1-NEXT: vse8.v v9, (a1) @@ -489,9 +489,9 @@ ; LMULMAX8-NEXT: vle64.v v8, (a0) ; LMULMAX8-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX8-NEXT: vncvt.x.x.w v8, v12 +; LMULMAX8-NEXT: vnsrl.wi v8, v12, 0 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX8-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX8-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX8-NEXT: vse8.v v8, (a1) ; LMULMAX8-NEXT: ret ; @@ -507,31 +507,31 @@ ; LMULMAX1-NEXT: vle64.v v11, (a0) ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v12, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v12 +; LMULMAX1-NEXT: vnsrl.wi v9, v12, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v12, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 +; LMULMAX1-NEXT: vnsrl.wi v11, v12, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v11, v11 +; LMULMAX1-NEXT: vnsrl.wi v11, v11, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v11, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v10, v11 +; LMULMAX1-NEXT: vnsrl.wi v10, v11, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 +; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0 ; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v10, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v10 +; LMULMAX1-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 6 ; LMULMAX1-NEXT: vse8.v v9, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll @@ -121,7 +121,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9, v0.t +; CHECK-NEXT: vnsrl.wi v8, v9, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -133,7 +133,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) ret <4 x i8> %v @@ -217,9 +217,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -231,9 +231,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) ret <4 x i8> %v @@ -247,7 +247,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -259,7 +259,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) ret <4 x i16> %v diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll @@ -121,7 +121,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9, v0.t +; CHECK-NEXT: vnsrl.wi v8, v9, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -133,7 +133,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) ret <4 x i8> %v @@ -217,9 +217,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -231,9 +231,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) ret <4 x i8> %v @@ -247,7 +247,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -259,7 +259,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) ret <4 x i16> %v diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll @@ -163,9 +163,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x @@ -179,9 +179,9 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX8-NEXT: vle32.v v8, (a0) -; LMULMAX8-NEXT: vncvt.x.x.w v10, v8 +; LMULMAX8-NEXT: vnsrl.wi v10, v8, 0 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX8-NEXT: vncvt.x.x.w v8, v10 +; LMULMAX8-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX8-NEXT: vse8.v v8, (a1) ; LMULMAX8-NEXT: ret ; @@ -189,9 +189,9 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX2-NEXT: vle32.v v8, (a0) -; LMULMAX2-NEXT: vncvt.x.x.w v10, v8 +; LMULMAX2-NEXT: vnsrl.wi v10, v8, 0 ; LMULMAX2-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX2-NEXT: vncvt.x.x.w v8, v10 +; LMULMAX2-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX2-NEXT: vse8.v v8, (a1) ; LMULMAX2-NEXT: ret ; @@ -201,13 +201,13 @@ ; LMULMAX1-NEXT: vle32.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v9, (a0) -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 +; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse8.v v8, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -4595,7 +4595,7 @@ ; RV32V-NEXT: vsext.vf8 v16, v8 ; RV32V-NEXT: vsll.vi v8, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v8 +; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32V-NEXT: vmv.v.v v8, v12 @@ -4877,7 +4877,7 @@ ; RV32V-NEXT: vzext.vf8 v16, v8 ; RV32V-NEXT: vsll.vi v8, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v8 +; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32V-NEXT: vmv.v.v v8, v12 @@ -5447,7 +5447,7 @@ ; RV32V-NEXT: vsext.vf4 v16, v8 ; RV32V-NEXT: vsll.vi v8, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v8 +; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32V-NEXT: vmv.v.v v8, v12 @@ -5730,7 +5730,7 @@ ; RV32V-NEXT: vzext.vf4 v16, v8 ; RV32V-NEXT: vsll.vi v8, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v8 +; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32V-NEXT: vmv.v.v v8, v12 @@ -6301,7 +6301,7 @@ ; RV32V-NEXT: vsext.vf2 v16, v8 ; RV32V-NEXT: vsll.vi v8, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v8 +; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32V-NEXT: vmv.v.v v8, v12 @@ -6583,7 +6583,7 @@ ; RV32V-NEXT: vzext.vf2 v16, v8 ; RV32V-NEXT: vsll.vi v8, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v8 +; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32V-NEXT: vmv.v.v v8, v12 @@ -6872,7 +6872,7 @@ ; RV32V-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32V-NEXT: vsll.vi v8, v8, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v8 +; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32V-NEXT: vmv.v.v v8, v12 @@ -10364,7 +10364,7 @@ ; RV32V-NEXT: vsext.vf8 v16, v8 ; RV32V-NEXT: vsll.vi v8, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v8 +; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32V-NEXT: vmv.v.v v8, v12 @@ -10585,7 +10585,7 @@ ; RV32V-NEXT: vzext.vf8 v16, v8 ; RV32V-NEXT: vsll.vi v8, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v8 +; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32V-NEXT: vmv.v.v v8, v12 @@ -11033,7 +11033,7 @@ ; RV32V-NEXT: vsext.vf4 v16, v8 ; RV32V-NEXT: vsll.vi v8, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v8 +; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32V-NEXT: vmv.v.v v8, v12 @@ -11255,7 +11255,7 @@ ; RV32V-NEXT: vzext.vf4 v16, v8 ; RV32V-NEXT: vsll.vi v8, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v8 +; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32V-NEXT: vmv.v.v v8, v12 @@ -11706,7 +11706,7 @@ ; RV32V-NEXT: vsext.vf2 v16, v8 ; RV32V-NEXT: vsll.vi v8, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v8 +; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32V-NEXT: vmv.v.v v8, v12 @@ -11929,7 +11929,7 @@ ; RV32V-NEXT: vzext.vf2 v16, v8 ; RV32V-NEXT: vsll.vi v8, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v8 +; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32V-NEXT: vmv.v.v v8, v12 @@ -12159,7 +12159,7 @@ ; RV32V-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32V-NEXT: vsll.vi v8, v8, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v8 +; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32V-NEXT: vmv.v.v v8, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll @@ -96,21 +96,21 @@ ; RV32V-LABEL: mscatter_v2i16_truncstore_v2i8: ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; RV32V-NEXT: vncvt.x.x.w v8, v8 +; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i16_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v2i16_truncstore_v2i8: ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu -; RV32ZVE32F-NEXT: vncvt.x.x.w v8, v8 +; RV32ZVE32F-NEXT: vnsrl.wi v8, v8, 0 ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; @@ -119,7 +119,7 @@ ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 -; RV64ZVE32F-NEXT: vncvt.x.x.w v8, v8 +; RV64ZVE32F-NEXT: vnsrl.wi v8, v8, 0 ; RV64ZVE32F-NEXT: bnez a3, .LBB2_3 ; RV64ZVE32F-NEXT: # %bb.1: # %else ; RV64ZVE32F-NEXT: andi a0, a2, 2 @@ -145,38 +145,38 @@ ; RV32V-LABEL: mscatter_v2i32_truncstore_v2i8: ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV32V-NEXT: vncvt.x.x.w v8, v8 +; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32V-NEXT: vncvt.x.x.w v8, v8 +; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i32_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v2i32_truncstore_v2i8: ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu -; RV32ZVE32F-NEXT: vncvt.x.x.w v8, v8 +; RV32ZVE32F-NEXT: vnsrl.wi v8, v8, 0 ; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32ZVE32F-NEXT: vncvt.x.x.w v8, v8 +; RV32ZVE32F-NEXT: vnsrl.wi v8, v8, 0 ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v2i32_truncstore_v2i8: ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu -; RV64ZVE32F-NEXT: vncvt.x.x.w v8, v8 +; RV64ZVE32F-NEXT: vnsrl.wi v8, v8, 0 ; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 -; RV64ZVE32F-NEXT: vncvt.x.x.w v8, v8 +; RV64ZVE32F-NEXT: vnsrl.wi v8, v8, 0 ; RV64ZVE32F-NEXT: bnez a3, .LBB3_3 ; RV64ZVE32F-NEXT: # %bb.1: # %else ; RV64ZVE32F-NEXT: andi a0, a2, 2 @@ -202,22 +202,22 @@ ; RV32V-LABEL: mscatter_v2i64_truncstore_v2i8: ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v8, v8 +; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32V-NEXT: vncvt.x.x.w v8, v8 +; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32V-NEXT: vncvt.x.x.w v8, v8 +; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; @@ -721,28 +721,28 @@ ; RV32V-LABEL: mscatter_v2i32_truncstore_v2i16: ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV32V-NEXT: vncvt.x.x.w v8, v8 +; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i32_truncstore_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v2i32_truncstore_v2i16: ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu -; RV32ZVE32F-NEXT: vncvt.x.x.w v8, v8 +; RV32ZVE32F-NEXT: vnsrl.wi v8, v8, 0 ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v2i32_truncstore_v2i16: ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu -; RV64ZVE32F-NEXT: vncvt.x.x.w v8, v8 +; RV64ZVE32F-NEXT: vnsrl.wi v8, v8, 0 ; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 @@ -771,18 +771,18 @@ ; RV32V-LABEL: mscatter_v2i64_truncstore_v2i16: ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v8, v8 +; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32V-NEXT: vncvt.x.x.w v8, v8 +; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; @@ -1701,14 +1701,14 @@ ; RV32V-LABEL: mscatter_v2i64_truncstore_v2i32: ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v8, v8 +; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; @@ -3757,7 +3757,7 @@ ; RV32V-NEXT: vsext.vf8 v16, v12 ; RV32V-NEXT: vsll.vi v12, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v12 +; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32V-NEXT: ret @@ -4007,7 +4007,7 @@ ; RV32V-NEXT: vzext.vf8 v16, v12 ; RV32V-NEXT: vsll.vi v12, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v12 +; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32V-NEXT: ret @@ -4513,7 +4513,7 @@ ; RV32V-NEXT: vsext.vf4 v16, v12 ; RV32V-NEXT: vsll.vi v12, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v12 +; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32V-NEXT: ret @@ -4764,7 +4764,7 @@ ; RV32V-NEXT: vzext.vf4 v16, v12 ; RV32V-NEXT: vsll.vi v12, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v12 +; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32V-NEXT: ret @@ -5273,7 +5273,7 @@ ; RV32V-NEXT: vsext.vf2 v16, v12 ; RV32V-NEXT: vsll.vi v12, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v12 +; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32V-NEXT: ret @@ -5525,7 +5525,7 @@ ; RV32V-NEXT: vzext.vf2 v16, v12 ; RV32V-NEXT: vsll.vi v12, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v12 +; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32V-NEXT: ret @@ -5784,7 +5784,7 @@ ; RV32V-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32V-NEXT: vsll.vi v12, v12, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v12 +; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32V-NEXT: ret @@ -8882,7 +8882,7 @@ ; RV32V-NEXT: vsext.vf8 v16, v12 ; RV32V-NEXT: vsll.vi v12, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v12 +; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32V-NEXT: ret @@ -9088,7 +9088,7 @@ ; RV32V-NEXT: vzext.vf8 v16, v12 ; RV32V-NEXT: vsll.vi v12, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v12 +; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32V-NEXT: ret @@ -9506,7 +9506,7 @@ ; RV32V-NEXT: vsext.vf4 v16, v12 ; RV32V-NEXT: vsll.vi v12, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v12 +; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32V-NEXT: ret @@ -9713,7 +9713,7 @@ ; RV32V-NEXT: vzext.vf4 v16, v12 ; RV32V-NEXT: vsll.vi v12, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v12 +; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32V-NEXT: ret @@ -10134,7 +10134,7 @@ ; RV32V-NEXT: vsext.vf2 v16, v12 ; RV32V-NEXT: vsll.vi v12, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v12 +; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32V-NEXT: ret @@ -10342,7 +10342,7 @@ ; RV32V-NEXT: vzext.vf2 v16, v12 ; RV32V-NEXT: vsll.vi v12, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v12 +; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32V-NEXT: ret @@ -10557,7 +10557,7 @@ ; RV32V-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32V-NEXT: vsll.vi v12, v12, 3 ; RV32V-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32V-NEXT: vncvt.x.x.w v16, v12 +; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32V-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll @@ -205,7 +205,7 @@ ; CHECK-LABEL: vpmerge_vptrunc: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: ret ; MIR-LABEL: name: vpmerge_vptrunc ; MIR: bb.0 (%ir-block.0): @@ -216,7 +216,7 @@ ; MIR-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v10m2 ; MIR-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8 ; MIR-NEXT: $v0 = COPY [[COPY1]] - ; MIR-NEXT: early-clobber %4:vrnov0 = PseudoVNSRL_WX_M1_MASK [[COPY3]], [[COPY2]], $x0, $v0, [[COPY]], 5 /* e32 */, 0 + ; MIR-NEXT: early-clobber %4:vrnov0 = PseudoVNSRL_WI_M1_MASK [[COPY3]], [[COPY2]], 0, $v0, [[COPY]], 5 /* e32 */, 0 ; MIR-NEXT: $v8 = COPY %4 ; MIR-NEXT: PseudoRET implicit $v8 %splat = insertelement <8 x i1> poison, i1 -1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -965,7 +965,7 @@ ; RV32-NEXT: vsext.vf8 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v8 +; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -991,7 +991,7 @@ ; RV32-NEXT: vzext.vf8 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v8 +; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1040,7 +1040,7 @@ ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v8 +; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1066,7 +1066,7 @@ ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v8 +; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1114,7 +1114,7 @@ ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v8 +; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1140,7 +1140,7 @@ ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v8 +; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1165,7 +1165,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v8 +; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1702,7 +1702,7 @@ ; RV32-NEXT: vsext.vf8 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v8 +; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1728,7 +1728,7 @@ ; RV32-NEXT: vzext.vf8 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v8 +; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1777,7 +1777,7 @@ ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v8 +; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1803,7 +1803,7 @@ ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v8 +; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1851,7 +1851,7 @@ ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v8 +; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1877,7 +1877,7 @@ ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v8 +; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1902,7 +1902,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v8 +; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -2055,7 +2055,7 @@ ; RV32-NEXT: vsext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v16 +; RV32-NEXT: vnsrl.wi v12, v16, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v10, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2068,7 +2068,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v24, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v4, v24 +; RV32-NEXT: vnsrl.wi v4, v24, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v10 ; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t @@ -2128,7 +2128,7 @@ ; RV32-NEXT: vzext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v12, v16 +; RV32-NEXT: vnsrl.wi v12, v16, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v10, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2141,7 +2141,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v24, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v4, v24 +; RV32-NEXT: vnsrl.wi v4, v24, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v10 ; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t @@ -2265,7 +2265,7 @@ ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v16 +; RV32-NEXT: vnsrl.wi v8, v16, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v12, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2278,7 +2278,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v24, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v4, v24 +; RV32-NEXT: vnsrl.wi v4, v24, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v12 ; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t @@ -2338,7 +2338,7 @@ ; RV32-NEXT: vzext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v16 +; RV32-NEXT: vnsrl.wi v8, v16, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v12, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2351,7 +2351,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v24, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v4, v24 +; RV32-NEXT: vnsrl.wi v4, v24, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v12 ; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t @@ -2474,7 +2474,7 @@ ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v4, v8 +; RV32-NEXT: vnsrl.wi v4, v8, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v1, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2487,7 +2487,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t @@ -2547,7 +2547,7 @@ ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v4, v8 +; RV32-NEXT: vnsrl.wi v4, v8, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v1, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2560,7 +2560,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t @@ -2616,7 +2616,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v28, v16 +; RV32-NEXT: vnsrl.wi v28, v16, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v24, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2629,7 +2629,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v28, v8 +; RV32-NEXT: vnsrl.wi v28, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v24 ; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll @@ -26,7 +26,7 @@ ; RV32-LABEL: vpscatter_v2i16_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -34,7 +34,7 @@ ; RV64-LABEL: vpscatter_v2i16_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -47,9 +47,9 @@ ; RV32-LABEL: vpscatter_v2i32_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -57,9 +57,9 @@ ; RV64-LABEL: vpscatter_v2i32_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -72,11 +72,11 @@ ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -84,11 +84,11 @@ ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -194,7 +194,7 @@ ; RV32-LABEL: vpscatter_v2i32_truncstore_v2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -202,7 +202,7 @@ ; RV64-LABEL: vpscatter_v2i32_truncstore_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -215,9 +215,9 @@ ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -225,9 +225,9 @@ ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -442,7 +442,7 @@ ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -450,7 +450,7 @@ ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -779,7 +779,7 @@ ; RV32-NEXT: vsext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v12 +; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -805,7 +805,7 @@ ; RV32-NEXT: vzext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v12 +; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -854,7 +854,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v12 +; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -880,7 +880,7 @@ ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v12 +; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -928,7 +928,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v12 +; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -954,7 +954,7 @@ ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v12 +; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -979,7 +979,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v12 +; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1500,7 +1500,7 @@ ; RV32-NEXT: vsext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v12 +; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1526,7 +1526,7 @@ ; RV32-NEXT: vzext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v12 +; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1575,7 +1575,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v12 +; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1601,7 +1601,7 @@ ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v12 +; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1649,7 +1649,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v12 +; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1675,7 +1675,7 @@ ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v12 +; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1700,7 +1700,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v12 +; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1924,7 +1924,7 @@ ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: addi a1, a2, -16 ; RV32-NEXT: addi a4, sp, 16 @@ -1937,7 +1937,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu @@ -2058,7 +2058,7 @@ ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: addi a1, a2, -16 ; RV32-NEXT: addi a4, sp, 16 @@ -2071,7 +2071,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll @@ -408,7 +408,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v12 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v12, v16 +; CHECK-NEXT: vnsrl.wi v12, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -431,7 +431,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v24, v16 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v24 +; CHECK-NEXT: vnsrl.wi v16, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll @@ -249,7 +249,7 @@ ; CHECK32-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK32-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK32-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK32-NEXT: vncvt.x.x.w v8, v12 +; CHECK32-NEXT: vnsrl.wi v8, v12, 0 ; CHECK32-NEXT: ret ; ; CHECK64-LABEL: test_signed_v4f64_v4i16: @@ -263,7 +263,7 @@ ; CHECK64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK64-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK64-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK64-NEXT: vncvt.x.x.w v8, v12 +; CHECK64-NEXT: vnsrl.wi v8, v12, 0 ; CHECK64-NEXT: ret %x = call @llvm.fptoui.sat.nxv4f64.nxv4i16( %f) ret %x @@ -281,7 +281,7 @@ ; CHECK32-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK32-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK32-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK32-NEXT: vncvt.x.x.w v8, v16 +; CHECK32-NEXT: vnsrl.wi v8, v16, 0 ; CHECK32-NEXT: ret ; ; CHECK64-LABEL: test_signed_v8f64_v8i16: @@ -295,7 +295,7 @@ ; CHECK64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK64-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK64-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK64-NEXT: vncvt.x.x.w v8, v16 +; CHECK64-NEXT: vnsrl.wi v8, v16, 0 ; CHECK64-NEXT: ret %x = call @llvm.fptoui.sat.nxv8f64.nxv8i16( %f) ret %x diff --git a/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll b/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll --- a/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll +++ b/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll @@ -6,9 +6,9 @@ ; CHECK-LABEL: trunc_nxv4i32_to_nxv4i5: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %v = trunc %a to ret %v @@ -18,9 +18,9 @@ ; CHECK-LABEL: trunc_nxv1i32_to_nxv1i5: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = trunc %a to ret %v diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -1038,7 +1038,7 @@ ; RV32-NEXT: vsext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1065,7 +1065,7 @@ ; RV32-NEXT: vzext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1116,7 +1116,7 @@ ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1143,7 +1143,7 @@ ; RV32-NEXT: vzext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1193,7 +1193,7 @@ ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1220,7 +1220,7 @@ ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1246,7 +1246,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1969,7 +1969,7 @@ ; RV32-NEXT: vsext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1996,7 +1996,7 @@ ; RV32-NEXT: vzext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2047,7 +2047,7 @@ ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2074,7 +2074,7 @@ ; RV32-NEXT: vzext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2124,7 +2124,7 @@ ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2151,7 +2151,7 @@ ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2177,7 +2177,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll @@ -44,14 +44,14 @@ ; RV32-LABEL: mscatter_nxv2i16_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i16_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -63,18 +63,18 @@ ; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -86,22 +86,22 @@ ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV32-NEXT: vncvt.x.x.w v11, v8 +; RV32-NEXT: vnsrl.wi v11, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v11 +; RV32-NEXT: vnsrl.wi v8, v11, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV64-NEXT: vncvt.x.x.w v12, v8 +; RV64-NEXT: vnsrl.wi v12, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v12 +; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -232,14 +232,14 @@ ; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -251,18 +251,18 @@ ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV32-NEXT: vncvt.x.x.w v11, v8 +; RV32-NEXT: vnsrl.wi v11, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v11 +; RV32-NEXT: vnsrl.wi v8, v11, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV64-NEXT: vncvt.x.x.w v12, v8 +; RV64-NEXT: vnsrl.wi v12, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v12 +; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -466,14 +466,14 @@ ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV32-NEXT: vncvt.x.x.w v11, v8 +; RV32-NEXT: vnsrl.wi v11, v8, 0 ; RV32-NEXT: vsoxei32.v v11, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV64-NEXT: vncvt.x.x.w v12, v8 +; RV64-NEXT: vnsrl.wi v12, v8, 0 ; RV64-NEXT: vsoxei64.v v12, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -827,7 +827,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -852,7 +852,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -899,7 +899,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -924,7 +924,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -970,7 +970,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -995,7 +995,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1019,7 +1019,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1609,7 +1609,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1634,7 +1634,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1681,7 +1681,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1706,7 +1706,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1752,7 +1752,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1777,7 +1777,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1801,7 +1801,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll @@ -229,7 +229,7 @@ ; CHECK-LABEL: vpmerge_vptrunc: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: ret ; MIR-LABEL: name: vpmerge_vptrunc ; MIR: bb.0 (%ir-block.0): @@ -240,7 +240,7 @@ ; MIR-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v10m2 ; MIR-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8 ; MIR-NEXT: $v0 = COPY [[COPY1]] - ; MIR-NEXT: early-clobber %4:vrnov0 = PseudoVNSRL_WX_M1_MASK [[COPY3]], [[COPY2]], $x0, $v0, [[COPY]], 5 /* e32 */, 0 + ; MIR-NEXT: early-clobber %4:vrnov0 = PseudoVNSRL_WI_M1_MASK [[COPY3]], [[COPY2]], 0, $v0, [[COPY]], 5 /* e32 */, 0 ; MIR-NEXT: $v8 = COPY %4 ; MIR-NEXT: PseudoRET implicit $v8 %splat = insertelement poison, i1 -1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll @@ -658,7 +658,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -670,7 +670,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -770,7 +770,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -782,7 +782,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -882,7 +882,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -894,7 +894,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -994,7 +994,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v12 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1006,7 +1006,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v12 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1106,7 +1106,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v16 +; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1118,7 +1118,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v16 +; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1196,9 +1196,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1210,9 +1210,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1224,7 +1224,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1236,7 +1236,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1314,9 +1314,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1328,9 +1328,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1342,7 +1342,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1354,7 +1354,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1432,9 +1432,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v12 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1446,9 +1446,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v12 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1460,7 +1460,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v12 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1472,7 +1472,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v12 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1550,9 +1550,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v16 +; CHECK-NEXT: vnsrl.wi v10, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1564,9 +1564,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v16 +; CHECK-NEXT: vnsrl.wi v10, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1578,7 +1578,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v16 +; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1590,7 +1590,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v16 +; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll @@ -119,7 +119,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9, v0.t +; CHECK-NEXT: vnsrl.wi v8, v9, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i8.nxv2f32( %va, %m, i32 %evl) ret %v @@ -131,7 +131,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i8.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) ret %v @@ -215,9 +215,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i8.nxv2f64( %va, %m, i32 %evl) ret %v @@ -229,9 +229,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i8.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) ret %v @@ -245,7 +245,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i16.nxv2f64( %va, %m, i32 %evl) ret %v @@ -257,7 +257,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i16.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) ret %v diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll @@ -119,7 +119,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9, v0.t +; CHECK-NEXT: vnsrl.wi v8, v9, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i8.nxv2f32( %va, %m, i32 %evl) ret %v @@ -131,7 +131,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i8.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) ret %v @@ -215,9 +215,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i8.nxv2f64( %va, %m, i32 %evl) ret %v @@ -229,9 +229,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i8.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) ret %v @@ -245,7 +245,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i16.nxv2f64( %va, %m, i32 %evl) ret %v @@ -257,7 +257,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i16.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) ret %v diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll @@ -1056,7 +1056,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1082,7 +1082,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1131,7 +1131,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1157,7 +1157,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1205,7 +1205,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1231,7 +1231,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1256,7 +1256,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1851,7 +1851,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1877,7 +1877,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1926,7 +1926,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1952,7 +1952,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2000,7 +2000,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2026,7 +2026,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2051,7 +2051,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2117,7 +2117,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2143,7 +2143,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2192,7 +2192,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2218,7 +2218,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2266,7 +2266,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2292,7 +2292,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2317,7 +2317,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2466,7 +2466,7 @@ ; RV32-NEXT: vsext.vf4 v24, v10 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: srli a3, a2, 3 @@ -2480,7 +2480,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2536,7 +2536,7 @@ ; RV32-NEXT: vzext.vf4 v24, v10 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: srli a3, a2, 3 @@ -2550,7 +2550,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll @@ -44,7 +44,7 @@ ; RV32-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -52,7 +52,7 @@ ; RV64-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -65,9 +65,9 @@ ; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -75,9 +75,9 @@ ; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -90,11 +90,11 @@ ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV32-NEXT: vncvt.x.x.w v11, v8 +; RV32-NEXT: vnsrl.wi v11, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v11 +; RV32-NEXT: vnsrl.wi v8, v11, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret @@ -102,11 +102,11 @@ ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV64-NEXT: vncvt.x.x.w v12, v8 +; RV64-NEXT: vnsrl.wi v12, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v12 +; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -230,7 +230,7 @@ ; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v8 +; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -238,7 +238,7 @@ ; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v8 +; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -251,9 +251,9 @@ ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV32-NEXT: vncvt.x.x.w v11, v8 +; RV32-NEXT: vnsrl.wi v11, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32-NEXT: vncvt.x.x.w v8, v11 +; RV32-NEXT: vnsrl.wi v8, v11, 0 ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret @@ -261,9 +261,9 @@ ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV64-NEXT: vncvt.x.x.w v12, v8 +; RV64-NEXT: vnsrl.wi v12, v8, 0 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64-NEXT: vncvt.x.x.w v8, v12 +; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -460,7 +460,7 @@ ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV32-NEXT: vncvt.x.x.w v11, v8 +; RV32-NEXT: vnsrl.wi v11, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v11, (zero), v10, v0.t ; RV32-NEXT: ret @@ -468,7 +468,7 @@ ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV64-NEXT: vncvt.x.x.w v12, v8 +; RV64-NEXT: vnsrl.wi v12, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v12, (zero), v10, v0.t ; RV64-NEXT: ret @@ -815,7 +815,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -841,7 +841,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -890,7 +890,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -916,7 +916,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -964,7 +964,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -990,7 +990,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1015,7 +1015,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1590,7 +1590,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1616,7 +1616,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1665,7 +1665,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1691,7 +1691,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1739,7 +1739,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1765,7 +1765,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1790,7 +1790,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1855,7 +1855,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1881,7 +1881,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1930,7 +1930,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1956,7 +1956,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2004,7 +2004,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2030,7 +2030,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2055,7 +2055,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v16 +; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2232,7 +2232,7 @@ ; RV32-NEXT: vsext.vf4 v16, v26 ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: addi a3, sp, 16 ; RV32-NEXT: vl8re8.v v8, (a3) # Unknown-size Folded Reload @@ -2248,7 +2248,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 @@ -2348,7 +2348,7 @@ ; RV32-NEXT: vzext.vf4 v16, v26 ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v24, v8 +; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: addi a3, sp, 16 ; RV32-NEXT: vl8re8.v v8, (a3) # Unknown-size Folded Reload @@ -2364,7 +2364,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, mu -; RV32-NEXT: vncvt.x.x.w v16, v8 +; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: vtrunc_nxv2i7_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i7.nxv2i16( %a, %m, i32 %vl) ret %v @@ -20,7 +20,7 @@ ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i15: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i8.nxv2i15( %a, %m, i32 %vl) ret %v @@ -32,7 +32,7 @@ ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i8.nxv2i16( %a, %m, i32 %vl) ret %v @@ -42,7 +42,7 @@ ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i8.nxv2i16( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) ret %v @@ -54,9 +54,9 @@ ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i8.nxv2i32( %a, %m, i32 %vl) ret %v @@ -66,9 +66,9 @@ ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i8.nxv2i32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) ret %v @@ -80,11 +80,11 @@ ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8, v0.t +; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i8.nxv2i64( %a, %m, i32 %vl) ret %v @@ -94,11 +94,11 @@ ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i8.nxv2i64( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) ret %v @@ -110,7 +110,7 @@ ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i16.nxv2i32( %a, %m, i32 %vl) ret %v @@ -120,7 +120,7 @@ ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i16.nxv2i32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) ret %v @@ -132,9 +132,9 @@ ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8, v0.t +; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i16.nxv2i64( %a, %m, i32 %vl) ret %v @@ -144,9 +144,9 @@ ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i16.nxv2i64( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) ret %v @@ -169,18 +169,18 @@ ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB12_2: ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v28, v16, v0.t +; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v18, v28, v0.t +; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB12_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB12_4: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vncvt.x.x.w v20, v8, v0.t +; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v20, v0.t +; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv15i16.nxv15i64( %a, %m, i32 %vl) @@ -193,7 +193,7 @@ ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8, v0.t +; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i64.nxv2i32( %a, %m, i32 %vl) @@ -204,7 +204,7 @@ ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i64.nxv2i32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) @@ -229,18 +229,18 @@ ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB15_2: ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v28, v16, v0.t +; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v18, v28, v0.t +; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB15_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB15_4: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vncvt.x.x.w v20, v8, v0.t +; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v20, v0.t +; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv32i7.nxv32i32( %a, %m, i32 %vl) @@ -265,18 +265,18 @@ ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB16_2: ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v28, v16, v0.t +; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v18, v28, v0.t +; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB16_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB16_4: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vncvt.x.x.w v20, v8, v0.t +; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v20, v0.t +; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv32i8.nxv32i32( %a, %m, i32 %vl) @@ -318,7 +318,7 @@ ; CHECK-NEXT: srli a7, a1, 2 ; CHECK-NEXT: slli t0, a1, 3 ; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v12, v16, v0.t +; CHECK-NEXT: vnsrl.wi v12, v16, 0, v0.t ; CHECK-NEXT: bltu a5, a1, .LBB17_6 ; CHECK-NEXT: # %bb.5: ; CHECK-NEXT: mv a5, a1 @@ -335,7 +335,7 @@ ; CHECK-NEXT: add a5, sp, a5 ; CHECK-NEXT: addi a5, a5, 16 ; CHECK-NEXT: vl8re8.v v16, (a5) # Unknown-size Folded Reload -; CHECK-NEXT: vncvt.x.x.w v8, v16, v0.t +; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t ; CHECK-NEXT: bltu a2, a4, .LBB17_8 ; CHECK-NEXT: # %bb.7: ; CHECK-NEXT: mv a6, a4 @@ -363,7 +363,7 @@ ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vncvt.x.x.w v20, v24, v0.t +; CHECK-NEXT: vnsrl.wi v20, v24, 0, v0.t ; CHECK-NEXT: bltu a6, a1, .LBB17_12 ; CHECK-NEXT: # %bb.11: ; CHECK-NEXT: mv a6, a1 @@ -372,7 +372,7 @@ ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vncvt.x.x.w v16, v24, v0.t +; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll @@ -6,7 +6,7 @@ ; CHECK-LABEL: vtrunc_nxv1i16_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -16,7 +16,7 @@ ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -26,7 +26,7 @@ ; CHECK-LABEL: vtrunc_nxv4i16_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -36,7 +36,7 @@ ; CHECK-LABEL: vtrunc_nxv8i16_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -47,7 +47,7 @@ ; CHECK-LABEL: vtrunc_nxv16i16_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v12, v8 +; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -58,9 +58,9 @@ ; CHECK-LABEL: vtrunc_nxv1i32_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -70,7 +70,7 @@ ; CHECK-LABEL: vtrunc_nxv1i32_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -80,9 +80,9 @@ ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -92,7 +92,7 @@ ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -102,9 +102,9 @@ ; CHECK-LABEL: vtrunc_nxv4i32_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -114,7 +114,7 @@ ; CHECK-LABEL: vtrunc_nxv4i32_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -125,9 +125,9 @@ ; CHECK-LABEL: vtrunc_nxv8i32_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v12, v8 +; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v12 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -137,7 +137,7 @@ ; CHECK-LABEL: vtrunc_nxv8i32_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v12, v8 +; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -148,9 +148,9 @@ ; CHECK-LABEL: vtrunc_nxv16i32_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v8 +; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v16 +; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -160,7 +160,7 @@ ; CHECK-LABEL: vtrunc_nxv16i32_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v8 +; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -171,11 +171,11 @@ ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -185,9 +185,9 @@ ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -197,7 +197,7 @@ ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -207,11 +207,11 @@ ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -221,9 +221,9 @@ ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -233,7 +233,7 @@ ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v8 +; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -244,11 +244,11 @@ ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v12, v8 +; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v12 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -258,9 +258,9 @@ ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v12, v8 +; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v12 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -270,7 +270,7 @@ ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v12, v8 +; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -281,11 +281,11 @@ ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v8 +; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v10, v16 +; CHECK-NEXT: vnsrl.wi v10, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -295,9 +295,9 @@ ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v8 +; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vncvt.x.x.w v8, v16 +; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -307,7 +307,7 @@ ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vncvt.x.x.w v16, v8 +; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %tvec = trunc %va to