diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td --- a/llvm/lib/Target/ARM/ARMInstrInfo.td +++ b/llvm/lib/Target/ARM/ARMInstrInfo.td @@ -6387,7 +6387,7 @@ (RSBri GPR:$Rd, GPR:$Rm, 0, pred:$p, cc_out:$s)>; // Pre-v6, 'mov r0, r0' was used as a NOP encoding. -def : InstAlias<"nop${p}", (MOVr R0, R0, pred:$p, zero_reg)>, +def : InstAlias<"nop${p}", (MOVr R0, R0, pred:$p, zero_reg), 0>, Requires<[IsARM, NoV6]>; // MUL/UMLAL/SMLAL/UMULL/SMULL are available on all arches, but diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -893,6 +893,7 @@ defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>; def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; +def : InstAlias<"vneg.v $vd, $vs", (VRSUB_VX VR:$vd, VR:$vs, X0, zero_reg)>; // Vector Widening Integer Add/Subtract // Refer to 11.2 Widening Vector Arithmetic Instructions @@ -920,8 +921,12 @@ def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm", (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; +def : InstAlias<"vwcvt.x.x.v $vd, $vs", + (VWADD_VX VR:$vd, VR:$vs, X0, zero_reg)>; def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm", (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; +def : InstAlias<"vwcvtu.x.x.v $vd, $vs", + (VWADDU_VX VR:$vd, VR:$vs, X0, zero_reg)>; // Vector Integer Extension defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>; @@ -950,6 +955,8 @@ def : InstAlias<"vnot.v $vd, $vs$vm", (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>; +def : InstAlias<"vnot.v $vd, $vs", + (VXOR_VI VR:$vd, VR:$vs, -1, zero_reg)>; // Vector Single-Width Bit Shift Instructions defm VSLL_V : VSHT_IV_V_X_I<"vsll", 0b100101, uimm5>; @@ -968,6 +975,8 @@ def : InstAlias<"vncvt.x.x.w $vd, $vs$vm", (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; +def : InstAlias<"vncvt.x.x.w $vd, $vs", + (VNSRL_WX VR:$vd, VR:$vs, X0, zero_reg)>; // Vector Integer Comparison Instructions let RVVConstraint = NoConstraint in { @@ -1186,8 +1195,12 @@ def : InstAlias<"vfneg.v $vd, $vs$vm", (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>; +def : InstAlias<"vfneg.v $vd, $vs", + (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>; def : InstAlias<"vfabs.v $vd, $vs$vm", (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>; +def : InstAlias<"vfabs.v $vd, $vs", + (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>; // Vector Floating-Point Compare Instructions let RVVConstraint = NoConstraint in { diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll --- a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll +++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll @@ -21,7 +21,7 @@ ; RV32-NEXT: lui a0, 1048568 ; RV32-NEXT: vand.vx v8, v8, a0 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: fixedlen: @@ -32,7 +32,7 @@ ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: ret %v41 = insertelement <2 x i32> poison, i32 16, i32 0 %v42 = shufflevector <2 x i32> %v41, <2 x i32> poison, <2 x i32> zeroinitializer @@ -51,7 +51,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 16 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll @@ -14,7 +14,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -38,7 +38,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -60,9 +60,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v9, v9 ; RV32D-NEXT: vsrl.vi v9, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vncvt.x.x.w v9, v9 ; RV32D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vncvt.x.x.w v9, v9 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vmseq.vi v0, v8, 0 ; RV32D-NEXT: vrsub.vx v8, v9, a0 @@ -76,9 +76,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v9, v9 ; RV64D-NEXT: vsrl.vi v9, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vncvt.x.x.w v9, v9 ; RV64D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vncvt.x.x.w v9, v9 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vmseq.vi v0, v8, 0 ; RV64D-NEXT: vrsub.vx v8, v9, a0 @@ -99,7 +99,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -123,7 +123,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -145,9 +145,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v9, v9 ; RV32D-NEXT: vsrl.vi v9, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vncvt.x.x.w v9, v9 ; RV32D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vncvt.x.x.w v9, v9 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vmseq.vi v0, v8, 0 ; RV32D-NEXT: vrsub.vx v8, v9, a0 @@ -161,9 +161,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v9, v9 ; RV64D-NEXT: vsrl.vi v9, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vncvt.x.x.w v9, v9 ; RV64D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vncvt.x.x.w v9, v9 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vmseq.vi v0, v8, 0 ; RV64D-NEXT: vrsub.vx v8, v9, a0 @@ -184,7 +184,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -208,7 +208,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -230,9 +230,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v10, v10 ; RV32D-NEXT: vsrl.vi v10, v10, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v10, zero +; RV32D-NEXT: vncvt.x.x.w v9, v10 ; RV32D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vncvt.x.x.w v9, v9 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vmseq.vi v0, v8, 0 ; RV32D-NEXT: vrsub.vx v8, v9, a0 @@ -246,9 +246,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v10, v10 ; RV64D-NEXT: vsrl.vi v10, v10, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v10, zero +; RV64D-NEXT: vncvt.x.x.w v9, v10 ; RV64D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vncvt.x.x.w v9, v9 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vmseq.vi v0, v8, 0 ; RV64D-NEXT: vrsub.vx v8, v9, a0 @@ -269,7 +269,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -293,7 +293,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -315,9 +315,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v12, v12 ; RV32D-NEXT: vsrl.vi v12, v12, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v12, zero +; RV32D-NEXT: vncvt.x.x.w v10, v12 ; RV32D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v10, zero +; RV32D-NEXT: vncvt.x.x.w v9, v10 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vmseq.vi v0, v8, 0 ; RV32D-NEXT: vrsub.vx v8, v9, a0 @@ -331,9 +331,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v12, v12 ; RV64D-NEXT: vsrl.vi v12, v12, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v12, zero +; RV64D-NEXT: vncvt.x.x.w v10, v12 ; RV64D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v10, zero +; RV64D-NEXT: vncvt.x.x.w v9, v10 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vmseq.vi v0, v8, 0 ; RV64D-NEXT: vrsub.vx v8, v9, a0 @@ -354,7 +354,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v10 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v10, v10, a0 @@ -378,7 +378,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v10 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v10, v10, a0 @@ -400,9 +400,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v16, v16 ; RV32D-NEXT: vsrl.vi v16, v16, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v16, zero +; RV32D-NEXT: vncvt.x.x.w v12, v16 ; RV32D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v12, zero +; RV32D-NEXT: vncvt.x.x.w v10, v12 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vmseq.vi v0, v8, 0 ; RV32D-NEXT: vrsub.vx v8, v10, a0 @@ -416,9 +416,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v16, v16 ; RV64D-NEXT: vsrl.vi v16, v16, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v16, zero +; RV64D-NEXT: vncvt.x.x.w v12, v16 ; RV64D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v12, zero +; RV64D-NEXT: vncvt.x.x.w v10, v12 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vmseq.vi v0, v8, 0 ; RV64D-NEXT: vrsub.vx v8, v10, a0 @@ -439,7 +439,7 @@ ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: vsrl.vi v12, v8, 4 ; CHECK-NEXT: vor.vv v8, v8, v12 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v12, v12, a0 @@ -468,7 +468,7 @@ ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vsrl.vi v16, v8, 4 ; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vsrl.vi v16, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v16, v16, a0 @@ -499,7 +499,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -532,7 +532,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -561,7 +561,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32D-NEXT: vsrl.vi v9, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vncvt.x.x.w v9, v9 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v9, v9, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -576,7 +576,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64D-NEXT: vsrl.vi v9, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vncvt.x.x.w v9, v9 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v9, v9, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -600,7 +600,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -633,7 +633,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -662,7 +662,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32D-NEXT: vsrl.vi v9, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vncvt.x.x.w v9, v9 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v9, v9, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -677,7 +677,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64D-NEXT: vsrl.vi v9, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vncvt.x.x.w v9, v9 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v9, v9, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -701,7 +701,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -734,7 +734,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -763,7 +763,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV32D-NEXT: vsrl.vi v10, v10, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v10, zero +; RV32D-NEXT: vncvt.x.x.w v9, v10 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v9, v9, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -778,7 +778,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64D-NEXT: vsrl.vi v10, v10, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v10, zero +; RV64D-NEXT: vncvt.x.x.w v9, v10 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v9, v9, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -802,7 +802,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v10 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -835,7 +835,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v10 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -864,7 +864,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV32D-NEXT: vsrl.vi v12, v12, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v12, zero +; RV32D-NEXT: vncvt.x.x.w v10, v12 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v10, v10, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -879,7 +879,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64D-NEXT: vsrl.vi v12, v12, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v12, zero +; RV64D-NEXT: vncvt.x.x.w v10, v12 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v10, v10, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -903,7 +903,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v12 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -936,7 +936,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v12 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -965,7 +965,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; RV32D-NEXT: vsrl.vi v16, v16, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v16, zero +; RV32D-NEXT: vncvt.x.x.w v12, v16 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v12, v12, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -980,7 +980,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; RV64D-NEXT: vsrl.vi v16, v16, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v16, zero +; RV64D-NEXT: vncvt.x.x.w v12, v16 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v12, v12, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -1004,7 +1004,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 8 ; RV32-NEXT: vor.vv v8, v8, v16 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 5 ; RV32-NEXT: addi a0, a0, 1365 @@ -1037,7 +1037,7 @@ ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 8 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 5 ; RV64-NEXT: addiw a0, a0, 1365 @@ -1077,7 +1077,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -1113,7 +1113,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -1144,7 +1144,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32D-NEXT: vsrl.vx v9, v9, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vncvt.x.x.w v9, v9 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v9, v9, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1160,7 +1160,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64D-NEXT: vsrl.vx v9, v9, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vncvt.x.x.w v9, v9 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v9, v9, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -1186,7 +1186,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -1222,7 +1222,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -1253,7 +1253,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32D-NEXT: vsrl.vx v10, v10, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v10, zero +; RV32D-NEXT: vncvt.x.x.w v9, v10 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v9, v9, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1269,7 +1269,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64D-NEXT: vsrl.vx v10, v10, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v10, zero +; RV64D-NEXT: vncvt.x.x.w v9, v10 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v9, v9, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -1295,7 +1295,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v10 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -1331,7 +1331,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v10 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -1362,7 +1362,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32D-NEXT: vsrl.vx v12, v12, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v12, zero +; RV32D-NEXT: vncvt.x.x.w v10, v12 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v10, v10, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1378,7 +1378,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64D-NEXT: vsrl.vx v12, v12, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v12, zero +; RV64D-NEXT: vncvt.x.x.w v10, v12 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v10, v10, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -1404,7 +1404,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v12 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -1440,7 +1440,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v12 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -1471,7 +1471,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32D-NEXT: vsrl.vx v16, v16, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v16, zero +; RV32D-NEXT: vncvt.x.x.w v12, v16 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v12, v12, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1487,7 +1487,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV64D-NEXT: vsrl.vx v16, v16, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v16, zero +; RV64D-NEXT: vncvt.x.x.w v12, v16 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v12, v12, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -1513,7 +1513,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 16 ; RV32-NEXT: vor.vv v8, v8, v16 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 349525 ; RV32-NEXT: addi a0, a0, 1365 @@ -1549,7 +1549,7 @@ ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 16 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 @@ -1613,7 +1613,7 @@ ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsrl.vi v11, v8, 1 @@ -1652,7 +1652,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v9, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v9 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI18_0) ; RV64-NEXT: ld a0, %lo(.LCPI18_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI18_1) @@ -1717,7 +1717,7 @@ ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsrl.vi v14, v8, 1 @@ -1756,7 +1756,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v10, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v10 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI19_0) ; RV64-NEXT: ld a0, %lo(.LCPI19_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI19_1) @@ -1821,7 +1821,7 @@ ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsrl.vi v20, v8, 1 @@ -1860,7 +1860,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v12, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v12 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI20_0) ; RV64-NEXT: ld a0, %lo(.LCPI20_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI20_1) @@ -1925,7 +1925,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsrl.vi v0, v8, 1 @@ -1964,7 +1964,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v16, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI21_0) ; RV64-NEXT: ld a0, %lo(.LCPI21_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI21_1) @@ -2002,7 +2002,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -2026,7 +2026,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -2048,9 +2048,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v9 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2062,9 +2062,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v9 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2082,7 +2082,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -2106,7 +2106,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -2128,9 +2128,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v9 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2142,9 +2142,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v9 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2162,7 +2162,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -2186,7 +2186,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -2208,9 +2208,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v10 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vncvt.x.x.w v10, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v10, zero +; RV32D-NEXT: vncvt.x.x.w v8, v10 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2222,9 +2222,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v10 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vncvt.x.x.w v10, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v10, zero +; RV64D-NEXT: vncvt.x.x.w v8, v10 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2242,7 +2242,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -2266,7 +2266,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -2288,9 +2288,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v12 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vncvt.x.x.w v12, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v12, zero +; RV32D-NEXT: vncvt.x.x.w v8, v12 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2302,9 +2302,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v12 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vncvt.x.x.w v12, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v12, zero +; RV64D-NEXT: vncvt.x.x.w v8, v12 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2322,7 +2322,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v10 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v10, v10, a0 @@ -2346,7 +2346,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v10 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v10, v10, a0 @@ -2368,9 +2368,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v16 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vncvt.x.x.w v16, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v16, zero +; RV32D-NEXT: vncvt.x.x.w v8, v16 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2382,9 +2382,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v16 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vncvt.x.x.w v16, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v16, zero +; RV64D-NEXT: vncvt.x.x.w v8, v16 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2402,7 +2402,7 @@ ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: vsrl.vi v12, v8, 4 ; CHECK-NEXT: vor.vv v8, v8, v12 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v12, v12, a0 @@ -2430,7 +2430,7 @@ ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vsrl.vi v16, v8, 4 ; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vsrl.vi v16, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v16, v16, a0 @@ -2460,7 +2460,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -2493,7 +2493,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -2522,7 +2522,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32D-NEXT: vsrl.vi v8, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2534,7 +2534,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64D-NEXT: vsrl.vi v8, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2554,7 +2554,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -2587,7 +2587,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -2616,7 +2616,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32D-NEXT: vsrl.vi v8, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2628,7 +2628,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64D-NEXT: vsrl.vi v8, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2648,7 +2648,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -2681,7 +2681,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -2710,7 +2710,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV32D-NEXT: vsrl.vi v8, v10, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vncvt.x.x.w v10, v8 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v8, v10, a0 ; RV32D-NEXT: ret @@ -2722,7 +2722,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64D-NEXT: vsrl.vi v8, v10, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vncvt.x.x.w v10, v8 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v8, v10, a0 ; RV64D-NEXT: ret @@ -2742,7 +2742,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v10 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -2775,7 +2775,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v10 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -2804,7 +2804,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV32D-NEXT: vsrl.vi v8, v12, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vncvt.x.x.w v12, v8 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v8, v12, a0 ; RV32D-NEXT: ret @@ -2816,7 +2816,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64D-NEXT: vsrl.vi v8, v12, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vncvt.x.x.w v12, v8 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v8, v12, a0 ; RV64D-NEXT: ret @@ -2836,7 +2836,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v12 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -2869,7 +2869,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v12 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -2898,7 +2898,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; RV32D-NEXT: vsrl.vi v8, v16, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vncvt.x.x.w v16, v8 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v8, v16, a0 ; RV32D-NEXT: ret @@ -2910,7 +2910,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; RV64D-NEXT: vsrl.vi v8, v16, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vncvt.x.x.w v16, v8 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v8, v16, a0 ; RV64D-NEXT: ret @@ -2930,7 +2930,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 8 ; RV32-NEXT: vor.vv v8, v8, v16 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 5 ; RV32-NEXT: addi a0, a0, 1365 @@ -2963,7 +2963,7 @@ ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 8 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 5 ; RV64-NEXT: addiw a0, a0, 1365 @@ -3002,7 +3002,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -3038,7 +3038,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -3069,7 +3069,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32D-NEXT: vsrl.vx v8, v9, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -3082,7 +3082,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64D-NEXT: vsrl.vx v8, v9, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -3104,7 +3104,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -3140,7 +3140,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -3171,7 +3171,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32D-NEXT: vsrl.vx v8, v10, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vncvt.x.x.w v10, v8 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v8, v10, a0 ; RV32D-NEXT: ret @@ -3184,7 +3184,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64D-NEXT: vsrl.vx v8, v10, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vncvt.x.x.w v10, v8 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v8, v10, a0 ; RV64D-NEXT: ret @@ -3206,7 +3206,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v10 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -3242,7 +3242,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v10 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -3273,7 +3273,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32D-NEXT: vsrl.vx v8, v12, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vncvt.x.x.w v12, v8 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v8, v12, a0 ; RV32D-NEXT: ret @@ -3286,7 +3286,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64D-NEXT: vsrl.vx v8, v12, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vncvt.x.x.w v12, v8 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v8, v12, a0 ; RV64D-NEXT: ret @@ -3308,7 +3308,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v12 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -3344,7 +3344,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v12 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -3375,7 +3375,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32D-NEXT: vsrl.vx v8, v16, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vncvt.x.x.w v16, v8 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v8, v16, a0 ; RV32D-NEXT: ret @@ -3388,7 +3388,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV64D-NEXT: vsrl.vx v8, v16, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vncvt.x.x.w v16, v8 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v8, v16, a0 ; RV64D-NEXT: ret @@ -3410,7 +3410,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 16 ; RV32-NEXT: vor.vv v8, v8, v16 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 349525 ; RV32-NEXT: addi a0, a0, 1365 @@ -3446,7 +3446,7 @@ ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 16 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 @@ -3509,7 +3509,7 @@ ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsrl.vi v11, v8, 1 @@ -3548,7 +3548,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v9, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v9 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI40_0) ; RV64-NEXT: ld a0, %lo(.LCPI40_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI40_1) @@ -3612,7 +3612,7 @@ ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsrl.vi v14, v8, 1 @@ -3651,7 +3651,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v10, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v10 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI41_0) ; RV64-NEXT: ld a0, %lo(.LCPI41_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI41_1) @@ -3715,7 +3715,7 @@ ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsrl.vi v20, v8, 1 @@ -3754,7 +3754,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v12, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v12 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI42_0) ; RV64-NEXT: ld a0, %lo(.LCPI42_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI42_1) @@ -3818,7 +3818,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsrl.vi v0, v8, 1 @@ -3857,7 +3857,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v16, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI43_0) ; RV64-NEXT: ld a0, %lo(.LCPI43_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI43_1) diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll @@ -10,7 +10,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -31,7 +31,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -59,9 +59,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v9 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -79,9 +79,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v9 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -97,7 +97,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -118,7 +118,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -146,9 +146,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v9 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -166,9 +166,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v9 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -184,7 +184,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -205,7 +205,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -233,9 +233,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v10 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vncvt.x.x.w v10, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v10, zero +; RV32D-NEXT: vncvt.x.x.w v8, v10 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -253,9 +253,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v10 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vncvt.x.x.w v10, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v10, zero +; RV64D-NEXT: vncvt.x.x.w v8, v10 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -271,7 +271,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -292,7 +292,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -320,9 +320,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v12 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vncvt.x.x.w v12, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v12, zero +; RV32D-NEXT: vncvt.x.x.w v8, v12 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -340,9 +340,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v12 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vncvt.x.x.w v12, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v12, zero +; RV64D-NEXT: vncvt.x.x.w v8, v12 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -358,7 +358,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; RV32I-NEXT: vsub.vx v10, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -379,7 +379,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; RV64I-NEXT: vsub.vx v10, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -407,9 +407,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v16 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vncvt.x.x.w v16, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v16, zero +; RV32D-NEXT: vncvt.x.x.w v8, v16 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -427,9 +427,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v16 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vncvt.x.x.w v16, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v16, zero +; RV64D-NEXT: vncvt.x.x.w v8, v16 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -445,7 +445,7 @@ ; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vsub.vx v12, v8, a0 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: li a0, 85 @@ -471,7 +471,7 @@ ; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vsub.vx v16, v8, a0 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: vsrl.vi v16, v8, 1 ; CHECK-NEXT: li a0, 85 @@ -497,7 +497,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -525,7 +525,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -559,7 +559,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32D-NEXT: vsrl.vi v8, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: li a0, 16 @@ -577,7 +577,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64D-NEXT: vsrl.vi v8, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: li a0, 16 @@ -594,7 +594,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -622,7 +622,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -656,7 +656,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32D-NEXT: vsrl.vi v8, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: li a0, 16 @@ -674,7 +674,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64D-NEXT: vsrl.vi v8, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: li a0, 16 @@ -691,7 +691,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -719,7 +719,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -753,7 +753,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV32D-NEXT: vsrl.vi v8, v10, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vncvt.x.x.w v10, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v10, a0 ; RV32D-NEXT: li a0, 16 @@ -771,7 +771,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64D-NEXT: vsrl.vi v8, v10, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vncvt.x.x.w v10, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v10, a0 ; RV64D-NEXT: li a0, 16 @@ -788,7 +788,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV32I-NEXT: vsub.vx v10, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -816,7 +816,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV64I-NEXT: vsub.vx v10, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -850,7 +850,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV32D-NEXT: vsrl.vi v8, v12, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vncvt.x.x.w v12, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v12, a0 ; RV32D-NEXT: li a0, 16 @@ -868,7 +868,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64D-NEXT: vsrl.vi v8, v12, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vncvt.x.x.w v12, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v12, a0 ; RV64D-NEXT: li a0, 16 @@ -885,7 +885,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV32I-NEXT: vsub.vx v12, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -913,7 +913,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV64I-NEXT: vsub.vx v12, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -947,7 +947,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; RV32D-NEXT: vsrl.vi v8, v16, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vncvt.x.x.w v16, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v16, a0 ; RV32D-NEXT: li a0, 16 @@ -965,7 +965,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; RV64D-NEXT: vsrl.vi v8, v16, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vncvt.x.x.w v16, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v16, a0 ; RV64D-NEXT: li a0, 16 @@ -982,7 +982,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 5 @@ -1010,7 +1010,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 5 @@ -1043,7 +1043,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -1072,7 +1072,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -1106,7 +1106,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32D-NEXT: vsrl.vx v9, v10, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vncvt.x.x.w v9, v9 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v9, v9, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1126,7 +1126,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64D-NEXT: vsrl.vx v8, v9, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: li a0, 32 @@ -1143,7 +1143,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -1172,7 +1172,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -1206,7 +1206,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32D-NEXT: vsrl.vx v10, v10, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v10, zero +; RV32D-NEXT: vncvt.x.x.w v9, v10 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v9, v9, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1226,7 +1226,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64D-NEXT: vsrl.vx v8, v10, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vncvt.x.x.w v10, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v10, a0 ; RV64D-NEXT: li a0, 32 @@ -1243,7 +1243,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV32I-NEXT: vsub.vx v10, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -1272,7 +1272,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV64I-NEXT: vsub.vx v10, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -1306,7 +1306,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32D-NEXT: vsrl.vx v12, v12, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v12, zero +; RV32D-NEXT: vncvt.x.x.w v10, v12 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v10, v10, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1326,7 +1326,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64D-NEXT: vsrl.vx v8, v12, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vncvt.x.x.w v12, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v12, a0 ; RV64D-NEXT: li a0, 32 @@ -1343,7 +1343,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32I-NEXT: vsub.vx v12, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -1372,7 +1372,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV64I-NEXT: vsub.vx v12, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -1406,7 +1406,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32D-NEXT: vsrl.vx v16, v16, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v16, zero +; RV32D-NEXT: vncvt.x.x.w v12, v16 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v12, v12, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1426,7 +1426,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV64D-NEXT: vsrl.vx v8, v16, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vncvt.x.x.w v16, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v16, a0 ; RV64D-NEXT: li a0, 32 @@ -1443,7 +1443,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 349525 @@ -1472,7 +1472,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 349525 @@ -1524,7 +1524,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v9 @@ -1555,7 +1555,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v9 ; RV64-NEXT: lui a0, %hi(.LCPI18_0) ; RV64-NEXT: ld a0, %lo(.LCPI18_0)(a0) @@ -1608,7 +1608,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vsub.vx v10, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v10 @@ -1639,7 +1639,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vsub.vx v10, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v10 ; RV64-NEXT: lui a0, %hi(.LCPI19_0) ; RV64-NEXT: ld a0, %lo(.LCPI19_0)(a0) @@ -1692,7 +1692,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vsub.vx v12, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v12 @@ -1723,7 +1723,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vsub.vx v12, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v12 ; RV64-NEXT: lui a0, %hi(.LCPI20_0) ; RV64-NEXT: ld a0, %lo(.LCPI20_0)(a0) @@ -1776,7 +1776,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v16 @@ -1807,7 +1807,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 ; RV64-NEXT: lui a0, %hi(.LCPI21_0) ; RV64-NEXT: ld a0, %lo(.LCPI21_0)(a0) @@ -1842,7 +1842,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -1863,7 +1863,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -1889,9 +1889,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v9 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -1906,9 +1906,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v9 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -1922,7 +1922,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -1943,7 +1943,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -1969,9 +1969,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v9 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -1986,9 +1986,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v9 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2002,7 +2002,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -2023,7 +2023,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -2049,9 +2049,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v10 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vncvt.x.x.w v10, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v10, zero +; RV32D-NEXT: vncvt.x.x.w v8, v10 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2066,9 +2066,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v10 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vncvt.x.x.w v10, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v10, zero +; RV64D-NEXT: vncvt.x.x.w v8, v10 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2082,7 +2082,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -2103,7 +2103,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -2129,9 +2129,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v12 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vncvt.x.x.w v12, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v12, zero +; RV32D-NEXT: vncvt.x.x.w v8, v12 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2146,9 +2146,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v12 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vncvt.x.x.w v12, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v12, zero +; RV64D-NEXT: vncvt.x.x.w v8, v12 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2162,7 +2162,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; RV32I-NEXT: vsub.vx v10, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -2183,7 +2183,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; RV64I-NEXT: vsub.vx v10, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -2209,9 +2209,9 @@ ; RV32D-NEXT: vfcvt.f.xu.v v8, v16 ; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vncvt.x.x.w v16, v8 ; RV32D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v16, zero +; RV32D-NEXT: vncvt.x.x.w v8, v16 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2226,9 +2226,9 @@ ; RV64D-NEXT: vfcvt.f.xu.v v8, v16 ; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vncvt.x.x.w v16, v8 ; RV64D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v16, zero +; RV64D-NEXT: vncvt.x.x.w v8, v16 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2242,7 +2242,7 @@ ; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vsub.vx v12, v8, a0 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: li a0, 85 @@ -2267,7 +2267,7 @@ ; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vsub.vx v16, v8, a0 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: vsrl.vi v16, v8, 1 ; CHECK-NEXT: li a0, 85 @@ -2292,7 +2292,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -2320,7 +2320,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -2352,7 +2352,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32D-NEXT: vsrl.vi v8, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2366,7 +2366,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64D-NEXT: vsrl.vi v8, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2380,7 +2380,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -2408,7 +2408,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -2440,7 +2440,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32D-NEXT: vsrl.vi v8, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2454,7 +2454,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64D-NEXT: vsrl.vi v8, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2468,7 +2468,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -2496,7 +2496,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -2528,7 +2528,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV32D-NEXT: vsrl.vi v8, v10, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vncvt.x.x.w v10, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v10, a0 ; RV32D-NEXT: ret @@ -2542,7 +2542,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64D-NEXT: vsrl.vi v8, v10, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vncvt.x.x.w v10, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v10, a0 ; RV64D-NEXT: ret @@ -2556,7 +2556,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV32I-NEXT: vsub.vx v10, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -2584,7 +2584,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV64I-NEXT: vsub.vx v10, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -2616,7 +2616,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV32D-NEXT: vsrl.vi v8, v12, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vncvt.x.x.w v12, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v12, a0 ; RV32D-NEXT: ret @@ -2630,7 +2630,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64D-NEXT: vsrl.vi v8, v12, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vncvt.x.x.w v12, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v12, a0 ; RV64D-NEXT: ret @@ -2644,7 +2644,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV32I-NEXT: vsub.vx v12, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -2672,7 +2672,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV64I-NEXT: vsub.vx v12, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -2704,7 +2704,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; RV32D-NEXT: vsrl.vi v8, v16, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vncvt.x.x.w v16, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v16, a0 ; RV32D-NEXT: ret @@ -2718,7 +2718,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; RV64D-NEXT: vsrl.vi v8, v16, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vncvt.x.x.w v16, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v16, a0 ; RV64D-NEXT: ret @@ -2732,7 +2732,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 5 @@ -2760,7 +2760,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 5 @@ -2792,7 +2792,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -2821,7 +2821,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -2855,7 +2855,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32D-NEXT: vsrl.vx v8, v9, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2870,7 +2870,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64D-NEXT: vsrl.vx v8, v9, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2884,7 +2884,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -2913,7 +2913,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -2947,7 +2947,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32D-NEXT: vsrl.vx v8, v10, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vncvt.x.x.w v10, v8 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v8, v10, a0 ; RV32D-NEXT: ret @@ -2962,7 +2962,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64D-NEXT: vsrl.vx v8, v10, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vncvt.x.x.w v10, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v10, a0 ; RV64D-NEXT: ret @@ -2976,7 +2976,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV32I-NEXT: vsub.vx v10, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -3005,7 +3005,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV64I-NEXT: vsub.vx v10, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -3039,7 +3039,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32D-NEXT: vsrl.vx v8, v12, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vncvt.x.x.w v12, v8 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v8, v12, a0 ; RV32D-NEXT: ret @@ -3054,7 +3054,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64D-NEXT: vsrl.vx v8, v12, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vncvt.x.x.w v12, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v12, a0 ; RV64D-NEXT: ret @@ -3068,7 +3068,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32I-NEXT: vsub.vx v12, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -3097,7 +3097,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV64I-NEXT: vsub.vx v12, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -3131,7 +3131,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32D-NEXT: vsrl.vx v8, v16, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vncvt.x.x.w v16, v8 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v8, v16, a0 ; RV32D-NEXT: ret @@ -3146,7 +3146,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV64D-NEXT: vsrl.vx v8, v16, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vncvt.x.x.w v16, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v16, a0 ; RV64D-NEXT: ret @@ -3160,7 +3160,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 349525 @@ -3189,7 +3189,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 349525 @@ -3240,7 +3240,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v9 @@ -3271,7 +3271,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v9 ; RV64-NEXT: lui a0, %hi(.LCPI40_0) ; RV64-NEXT: ld a0, %lo(.LCPI40_0)(a0) @@ -3323,7 +3323,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vsub.vx v10, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v10 @@ -3354,7 +3354,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vsub.vx v10, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v10 ; RV64-NEXT: lui a0, %hi(.LCPI41_0) ; RV64-NEXT: ld a0, %lo(.LCPI41_0)(a0) @@ -3406,7 +3406,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vsub.vx v12, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v12 @@ -3437,7 +3437,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vsub.vx v12, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v12 ; RV64-NEXT: lui a0, %hi(.LCPI42_0) ; RV64-NEXT: ld a0, %lo(.LCPI42_0)(a0) @@ -3489,7 +3489,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v16 @@ -3520,7 +3520,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 ; RV64-NEXT: lui a0, %hi(.LCPI43_0) ; RV64-NEXT: ld a0, %lo(.LCPI43_0)(a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll @@ -410,7 +410,7 @@ ; CHECK-LABEL: truncstore_nxv1i16_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -474,7 +474,7 @@ ; CHECK-LABEL: truncstore_nxv2i16_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -538,7 +538,7 @@ ; CHECK-LABEL: truncstore_nxv4i16_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -598,7 +598,7 @@ ; CHECK-LABEL: truncstore_nxv8i16_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -658,7 +658,7 @@ ; CHECK-LABEL: truncstore_nxv16i16_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -694,7 +694,7 @@ ; CHECK-LABEL: truncstore_nxv32i16_nxv32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -706,9 +706,9 @@ ; CHECK-LABEL: truncstore_nxv1i32_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -720,7 +720,7 @@ ; CHECK-LABEL: truncstore_nxv1i32_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -758,9 +758,9 @@ ; CHECK-LABEL: truncstore_nxv2i32_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -772,7 +772,7 @@ ; CHECK-LABEL: truncstore_nxv2i32_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -808,9 +808,9 @@ ; CHECK-LABEL: truncstore_nxv4i32_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -822,7 +822,7 @@ ; CHECK-LABEL: truncstore_nxv4i32_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -858,9 +858,9 @@ ; CHECK-LABEL: truncstore_nxv8i32_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -872,7 +872,7 @@ ; CHECK-LABEL: truncstore_nxv8i32_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -908,9 +908,9 @@ ; CHECK-LABEL: truncstore_nxv16i32_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -922,7 +922,7 @@ ; CHECK-LABEL: truncstore_nxv16i32_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -934,11 +934,11 @@ ; CHECK-LABEL: truncstore_nxv1i64_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -950,9 +950,9 @@ ; CHECK-LABEL: truncstore_nxv1i64_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -964,7 +964,7 @@ ; CHECK-LABEL: truncstore_nxv1i64_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -976,11 +976,11 @@ ; CHECK-LABEL: truncstore_nxv2i64_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -992,9 +992,9 @@ ; CHECK-LABEL: truncstore_nxv2i64_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1006,7 +1006,7 @@ ; CHECK-LABEL: truncstore_nxv2i64_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1018,11 +1018,11 @@ ; CHECK-LABEL: truncstore_nxv4i64_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1034,9 +1034,9 @@ ; CHECK-LABEL: truncstore_nxv4i64_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1048,7 +1048,7 @@ ; CHECK-LABEL: truncstore_nxv4i64_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1060,11 +1060,11 @@ ; CHECK-LABEL: truncstore_nxv8i64_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1076,9 +1076,9 @@ ; CHECK-LABEL: truncstore_nxv8i64_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1090,7 +1090,7 @@ ; CHECK-LABEL: truncstore_nxv8i64_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret %y = trunc %x to diff --git a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll @@ -17,7 +17,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI0_1)(a0) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -40,7 +40,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI1_1)(a0) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -63,7 +63,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI2_1)(a0) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -86,7 +86,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI3_1)(a0) ; CHECK-NEXT: vfadd.vf v12, v10, ft0 ; CHECK-NEXT: vmerge.vvm v10, v10, v12, v0 -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft1 ; CHECK-NEXT: vfsgnj.vv v10, v10, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 @@ -109,7 +109,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI4_1)(a0) ; CHECK-NEXT: vfadd.vf v16, v12, ft0 ; CHECK-NEXT: vmerge.vvm v12, v12, v16, v0 -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft1 ; CHECK-NEXT: vfsgnj.vv v12, v12, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 @@ -132,7 +132,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI5_1)(a0) ; CHECK-NEXT: vfadd.vf v24, v16, ft0 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: vfsgnjx.vv v24, v8, v8 +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft1 ; CHECK-NEXT: vfsgnj.vv v16, v16, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 @@ -155,7 +155,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI6_1)(a0) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -178,7 +178,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI7_1)(a0) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -201,7 +201,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI8_1)(a0) ; CHECK-NEXT: vfadd.vf v12, v10, ft0 ; CHECK-NEXT: vmerge.vvm v10, v10, v12, v0 -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft1 ; CHECK-NEXT: vfsgnj.vv v10, v10, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 @@ -224,7 +224,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI9_1)(a0) ; CHECK-NEXT: vfadd.vf v16, v12, ft0 ; CHECK-NEXT: vmerge.vvm v12, v12, v16, v0 -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft1 ; CHECK-NEXT: vfsgnj.vv v12, v12, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 @@ -247,7 +247,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI10_1)(a0) ; CHECK-NEXT: vfadd.vf v24, v16, ft0 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: vfsgnjx.vv v24, v8, v8 +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft1 ; CHECK-NEXT: vfsgnj.vv v16, v16, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 @@ -270,7 +270,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI11_1)(a0) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -293,7 +293,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI12_1)(a0) ; CHECK-NEXT: vfadd.vf v12, v10, ft0 ; CHECK-NEXT: vmerge.vvm v10, v10, v12, v0 -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft1 ; CHECK-NEXT: vfsgnj.vv v10, v10, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 @@ -316,7 +316,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI13_1)(a0) ; CHECK-NEXT: vfadd.vf v16, v12, ft0 ; CHECK-NEXT: vmerge.vvm v12, v12, v16, v0 -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft1 ; CHECK-NEXT: vfsgnj.vv v12, v12, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 @@ -339,7 +339,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI14_1)(a0) ; CHECK-NEXT: vfadd.vf v24, v16, ft0 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: vfsgnjx.vv v24, v8, v8 +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft1 ; CHECK-NEXT: vfsgnj.vv v16, v16, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll @@ -17,7 +17,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI0_1)(a0) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -40,7 +40,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI1_1)(a0) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -63,7 +63,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI2_1)(a0) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -86,7 +86,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI3_1)(a0) ; CHECK-NEXT: vfsub.vf v12, v10, ft0 ; CHECK-NEXT: vmerge.vvm v10, v10, v12, v0 -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft1 ; CHECK-NEXT: vfsgnj.vv v10, v10, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 @@ -109,7 +109,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI4_1)(a0) ; CHECK-NEXT: vfsub.vf v16, v12, ft0 ; CHECK-NEXT: vmerge.vvm v12, v12, v16, v0 -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft1 ; CHECK-NEXT: vfsgnj.vv v12, v12, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 @@ -132,7 +132,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI5_1)(a0) ; CHECK-NEXT: vfsub.vf v24, v16, ft0 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: vfsgnjx.vv v24, v8, v8 +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft1 ; CHECK-NEXT: vfsgnj.vv v16, v16, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 @@ -155,7 +155,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI6_1)(a0) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -178,7 +178,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI7_1)(a0) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -201,7 +201,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI8_1)(a0) ; CHECK-NEXT: vfsub.vf v12, v10, ft0 ; CHECK-NEXT: vmerge.vvm v10, v10, v12, v0 -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft1 ; CHECK-NEXT: vfsgnj.vv v10, v10, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 @@ -224,7 +224,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI9_1)(a0) ; CHECK-NEXT: vfsub.vf v16, v12, ft0 ; CHECK-NEXT: vmerge.vvm v12, v12, v16, v0 -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft1 ; CHECK-NEXT: vfsgnj.vv v12, v12, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 @@ -247,7 +247,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI10_1)(a0) ; CHECK-NEXT: vfsub.vf v24, v16, ft0 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: vfsgnjx.vv v24, v8, v8 +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft1 ; CHECK-NEXT: vfsgnj.vv v16, v16, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 @@ -270,7 +270,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI11_1)(a0) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -293,7 +293,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI12_1)(a0) ; CHECK-NEXT: vfsub.vf v12, v10, ft0 ; CHECK-NEXT: vmerge.vvm v10, v10, v12, v0 -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft1 ; CHECK-NEXT: vfsgnj.vv v10, v10, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 @@ -316,7 +316,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI13_1)(a0) ; CHECK-NEXT: vfsub.vf v16, v12, ft0 ; CHECK-NEXT: vmerge.vvm v12, v12, v16, v0 -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft1 ; CHECK-NEXT: vfsgnj.vv v12, v12, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 @@ -339,7 +339,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI14_1)(a0) ; CHECK-NEXT: vfsub.vf v24, v16, ft0 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: vfsgnjx.vv v24, v8, v8 +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft1 ; CHECK-NEXT: vfsgnj.vv v16, v16, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll @@ -21,7 +21,7 @@ ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: vsrl.vi v9, v8, 4 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32-NEXT: li a1, 85 ; LMULMAX2-RV32-NEXT: vand.vx v9, v9, a1 @@ -47,7 +47,7 @@ ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV64-NEXT: vsrl.vi v9, v8, 4 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64-NEXT: li a1, 85 ; LMULMAX2-RV64-NEXT: vand.vx v9, v9, a1 @@ -73,7 +73,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 4 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: li a1, 85 ; LMULMAX1-RV32-NEXT: vand.vx v9, v9, a1 @@ -99,7 +99,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 4 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: li a1, 85 ; LMULMAX1-RV64-NEXT: vand.vx v9, v9, a1 @@ -125,7 +125,7 @@ ; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; LMULMAX8-RV32-NEXT: vnsrl.wi v10, v12, 23 ; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; LMULMAX8-RV32-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX8-RV32-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX8-RV32-NEXT: li a1, 134 ; LMULMAX8-RV32-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-RV32-NEXT: vrsub.vx v8, v9, a1 @@ -143,7 +143,7 @@ ; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; LMULMAX8-RV64-NEXT: vnsrl.wi v10, v12, 23 ; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; LMULMAX8-RV64-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX8-RV64-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX8-RV64-NEXT: li a1, 134 ; LMULMAX8-RV64-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-RV64-NEXT: vrsub.vx v8, v9, a1 @@ -171,7 +171,7 @@ ; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 8 ; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV32I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32I-NEXT: lui a1, 5 ; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365 @@ -206,7 +206,7 @@ ; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 8 ; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV64I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64I-NEXT: lui a1, 5 ; LMULMAX2-RV64I-NEXT: addiw a1, a1, 1365 @@ -241,7 +241,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 8 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: lui a1, 5 ; LMULMAX1-RV32-NEXT: addi a1, a1, 1365 @@ -276,7 +276,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 8 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: lui a1, 5 ; LMULMAX1-RV64-NEXT: addiw a1, a1, 1365 @@ -377,7 +377,7 @@ ; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 16 ; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV32I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32I-NEXT: lui a1, 349525 ; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365 @@ -415,7 +415,7 @@ ; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 16 ; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV64I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64I-NEXT: lui a1, 349525 ; LMULMAX2-RV64I-NEXT: addiw a1, a1, 1365 @@ -453,7 +453,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 16 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: lui a1, 349525 ; LMULMAX1-RV32-NEXT: addi a1, a1, 1365 @@ -491,7 +491,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 16 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: lui a1, 349525 ; LMULMAX1-RV64-NEXT: addiw a1, a1, 1365 @@ -657,7 +657,7 @@ ; LMULMAX2-RV64-NEXT: li a1, 32 ; LMULMAX2-RV64-NEXT: vsrl.vx v9, v8, a1 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI3_0) ; LMULMAX2-RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1) ; LMULMAX2-RV64-NEXT: lui a2, %hi(.LCPI3_1) @@ -756,7 +756,7 @@ ; LMULMAX1-RV64-NEXT: li a1, 32 ; LMULMAX1-RV64-NEXT: vsrl.vx v9, v8, a1 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI3_0) ; LMULMAX1-RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1) ; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI3_1) @@ -855,7 +855,7 @@ ; LMULMAX8-RV64-NEXT: li a1, 32 ; LMULMAX8-RV64-NEXT: vsrl.vx v9, v8, a1 ; LMULMAX8-RV64-NEXT: vor.vv v8, v8, v9 -; LMULMAX8-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX8-RV64-NEXT: vnot.v v8, v8 ; LMULMAX8-RV64-NEXT: lui a1, %hi(.LCPI3_0) ; LMULMAX8-RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1) ; LMULMAX8-RV64-NEXT: lui a2, %hi(.LCPI3_1) @@ -899,7 +899,7 @@ ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 4 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: li a1, 85 ; LMULMAX2-RV32-NEXT: vand.vx v10, v10, a1 @@ -926,7 +926,7 @@ ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 4 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: li a1, 85 ; LMULMAX2-RV64-NEXT: vand.vx v10, v10, a1 @@ -954,7 +954,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 4 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV32-NEXT: li a2, 85 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a2 @@ -973,7 +973,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 4 ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10 -; LMULMAX1-RV32-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV32-NEXT: vnot.v v9, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a2 ; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v10 @@ -1000,7 +1000,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 4 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV64-NEXT: li a2, 85 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2 @@ -1019,7 +1019,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 4 ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 -; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV64-NEXT: vnot.v v9, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2 ; LMULMAX1-RV64-NEXT: vsub.vv v9, v9, v10 @@ -1045,7 +1045,7 @@ ; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; LMULMAX8-RV32-NEXT: vnsrl.wi v12, v16, 23 ; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; LMULMAX8-RV32-NEXT: vnsrl.wx v10, v12, zero +; LMULMAX8-RV32-NEXT: vncvt.x.x.w v10, v12 ; LMULMAX8-RV32-NEXT: li a1, 134 ; LMULMAX8-RV32-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-RV32-NEXT: vrsub.vx v8, v10, a1 @@ -1064,7 +1064,7 @@ ; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; LMULMAX8-RV64-NEXT: vnsrl.wi v12, v16, 23 ; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; LMULMAX8-RV64-NEXT: vnsrl.wx v10, v12, zero +; LMULMAX8-RV64-NEXT: vncvt.x.x.w v10, v12 ; LMULMAX8-RV64-NEXT: li a1, 134 ; LMULMAX8-RV64-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-RV64-NEXT: vrsub.vx v8, v10, a1 @@ -1092,7 +1092,7 @@ ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 5 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 @@ -1127,7 +1127,7 @@ ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: lui a1, 5 ; LMULMAX2-RV64-NEXT: addiw a1, a1, 1365 @@ -1164,7 +1164,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV32-NEXT: lui a2, 5 ; LMULMAX1-RV32-NEXT: addi a2, a2, 1365 @@ -1192,7 +1192,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 8 ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10 -; LMULMAX1-RV32-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV32-NEXT: vnot.v v9, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a2 ; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v10 @@ -1223,7 +1223,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV64-NEXT: lui a2, 5 ; LMULMAX1-RV64-NEXT: addiw a2, a2, 1365 @@ -1251,7 +1251,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 8 ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 -; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV64-NEXT: vnot.v v9, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2 ; LMULMAX1-RV64-NEXT: vsub.vv v9, v9, v10 @@ -1318,7 +1318,7 @@ ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 16 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 @@ -1356,7 +1356,7 @@ ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 16 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: lui a1, 349525 ; LMULMAX2-RV64-NEXT: addiw a1, a1, 1365 @@ -1396,7 +1396,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 16 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV32-NEXT: lui a2, 349525 ; LMULMAX1-RV32-NEXT: addi a2, a2, 1365 @@ -1427,7 +1427,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 16 ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10 -; LMULMAX1-RV32-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV32-NEXT: vnot.v v9, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a2 ; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v10 @@ -1460,7 +1460,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 16 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV64-NEXT: lui a2, 349525 ; LMULMAX1-RV64-NEXT: addiw a2, a2, 1365 @@ -1491,7 +1491,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 16 ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 -; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV64-NEXT: vnot.v v9, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2 ; LMULMAX1-RV64-NEXT: vsub.vv v9, v9, v10 @@ -1620,7 +1620,7 @@ ; LMULMAX2-RV64-NEXT: li a1, 32 ; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a1 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI7_0) ; LMULMAX2-RV64-NEXT: ld a1, %lo(.LCPI7_0)(a1) ; LMULMAX2-RV64-NEXT: lui a2, %hi(.LCPI7_1) @@ -1749,7 +1749,7 @@ ; LMULMAX1-RV64-NEXT: li a2, 32 ; LMULMAX1-RV64-NEXT: vsrl.vx v10, v8, a2 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: lui a3, %hi(.LCPI7_0) ; LMULMAX1-RV64-NEXT: ld a3, %lo(.LCPI7_0)(a3) ; LMULMAX1-RV64-NEXT: lui a4, %hi(.LCPI7_1) @@ -1783,7 +1783,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vsrl.vx v10, v9, a2 ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 -; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV64-NEXT: vnot.v v9, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3 ; LMULMAX1-RV64-NEXT: vsub.vv v9, v9, v10 @@ -1874,7 +1874,7 @@ ; LMULMAX8-RV64-NEXT: li a1, 32 ; LMULMAX8-RV64-NEXT: vsrl.vx v10, v8, a1 ; LMULMAX8-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX8-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX8-RV64-NEXT: vnot.v v8, v8 ; LMULMAX8-RV64-NEXT: lui a1, %hi(.LCPI7_0) ; LMULMAX8-RV64-NEXT: ld a1, %lo(.LCPI7_0)(a1) ; LMULMAX8-RV64-NEXT: lui a2, %hi(.LCPI7_1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll @@ -17,7 +17,7 @@ ; LMULMAX2-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v9, v8, a1 -; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32-NEXT: li a1, 85 @@ -40,7 +40,7 @@ ; LMULMAX2-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v9, v8, a1 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64-NEXT: li a1, 85 @@ -63,7 +63,7 @@ ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: li a1, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v9, v8, a1 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: li a1, 85 @@ -86,7 +86,7 @@ ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: li a1, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v9, v8, a1 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: li a1, 85 @@ -115,7 +115,7 @@ ; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; LMULMAX8-RV32-NEXT: vnsrl.wi v10, v12, 23 ; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; LMULMAX8-RV32-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX8-RV32-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX8-RV32-NEXT: li a1, 127 ; LMULMAX8-RV32-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-RV32-NEXT: vsub.vx v8, v9, a1 @@ -135,7 +135,7 @@ ; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; LMULMAX8-RV64-NEXT: vnsrl.wi v10, v12, 23 ; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; LMULMAX8-RV64-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX8-RV64-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX8-RV64-NEXT: li a1, 127 ; LMULMAX8-RV64-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-RV64-NEXT: vsub.vx v8, v9, a1 @@ -157,7 +157,7 @@ ; LMULMAX2-RV32I-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32I-NEXT: li a1, 1 ; LMULMAX2-RV32I-NEXT: vsub.vx v9, v8, a1 -; LMULMAX2-RV32I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32I-NEXT: lui a1, 5 @@ -187,7 +187,7 @@ ; LMULMAX2-RV64I-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64I-NEXT: li a1, 1 ; LMULMAX2-RV64I-NEXT: vsub.vx v9, v8, a1 -; LMULMAX2-RV64I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64I-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64I-NEXT: lui a1, 5 @@ -217,7 +217,7 @@ ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: li a1, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v9, v8, a1 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: lui a1, 5 @@ -247,7 +247,7 @@ ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: li a1, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v9, v8, a1 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: lui a1, 5 @@ -349,7 +349,7 @@ ; LMULMAX2-RV32I-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32I-NEXT: li a1, 1 ; LMULMAX2-RV32I-NEXT: vsub.vx v9, v8, a1 -; LMULMAX2-RV32I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32I-NEXT: lui a1, 349525 @@ -380,7 +380,7 @@ ; LMULMAX2-RV64I-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64I-NEXT: li a1, 1 ; LMULMAX2-RV64I-NEXT: vsub.vx v9, v8, a1 -; LMULMAX2-RV64I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64I-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64I-NEXT: lui a1, 349525 @@ -411,7 +411,7 @@ ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: li a1, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v9, v8, a1 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: lui a1, 349525 @@ -442,7 +442,7 @@ ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: li a1, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v9, v8, a1 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: lui a1, 349525 @@ -596,7 +596,7 @@ ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v9, v8, a1 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI3_0) ; LMULMAX2-RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1) @@ -675,7 +675,7 @@ ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: li a1, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v9, v8, a1 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI3_0) ; LMULMAX1-RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1) @@ -754,7 +754,7 @@ ; LMULMAX8-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV64-NEXT: li a1, 1 ; LMULMAX8-RV64-NEXT: vsub.vx v9, v8, a1 -; LMULMAX8-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX8-RV64-NEXT: vnot.v v8, v8 ; LMULMAX8-RV64-NEXT: vand.vv v8, v8, v9 ; LMULMAX8-RV64-NEXT: lui a1, %hi(.LCPI3_0) ; LMULMAX8-RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1) @@ -795,7 +795,7 @@ ; LMULMAX2-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v10, v8, a1 -; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: li a1, 85 @@ -819,7 +819,7 @@ ; LMULMAX2-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v10, v8, a1 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: li a1, 85 @@ -844,7 +844,7 @@ ; LMULMAX1-RV32-NEXT: vle8.v v9, (a0) ; LMULMAX1-RV32-NEXT: li a2, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v8, a2 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV32-NEXT: li a3, 85 @@ -859,7 +859,7 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vand.vi v8, v8, 15 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v9, a2 -; LMULMAX1-RV32-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV32-NEXT: vnot.v v9, v9 ; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a3 @@ -883,7 +883,7 @@ ; LMULMAX1-RV64-NEXT: vle8.v v9, (a0) ; LMULMAX1-RV64-NEXT: li a2, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v8, a2 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV64-NEXT: li a3, 85 @@ -898,7 +898,7 @@ ; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vand.vi v8, v8, 15 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v9, a2 -; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV64-NEXT: vnot.v v9, v9 ; LMULMAX1-RV64-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3 @@ -927,7 +927,7 @@ ; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; LMULMAX8-RV32-NEXT: vnsrl.wi v12, v16, 23 ; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; LMULMAX8-RV32-NEXT: vnsrl.wx v10, v12, zero +; LMULMAX8-RV32-NEXT: vncvt.x.x.w v10, v12 ; LMULMAX8-RV32-NEXT: li a1, 127 ; LMULMAX8-RV32-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-RV32-NEXT: vsub.vx v8, v10, a1 @@ -948,7 +948,7 @@ ; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; LMULMAX8-RV64-NEXT: vnsrl.wi v12, v16, 23 ; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; LMULMAX8-RV64-NEXT: vnsrl.wx v10, v12, zero +; LMULMAX8-RV64-NEXT: vncvt.x.x.w v10, v12 ; LMULMAX8-RV64-NEXT: li a1, 127 ; LMULMAX8-RV64-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-RV64-NEXT: vsub.vx v8, v10, a1 @@ -970,7 +970,7 @@ ; LMULMAX2-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v10, v8, a1 -; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 5 @@ -1000,7 +1000,7 @@ ; LMULMAX2-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v10, v8, a1 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: lui a1, 5 @@ -1032,7 +1032,7 @@ ; LMULMAX1-RV32-NEXT: vle16.v v9, (a0) ; LMULMAX1-RV32-NEXT: li a2, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v8, a2 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV32-NEXT: lui a3, 5 @@ -1054,7 +1054,7 @@ ; LMULMAX1-RV32-NEXT: vmul.vx v8, v8, a6 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v9, a2 -; LMULMAX1-RV32-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV32-NEXT: vnot.v v9, v9 ; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a3 @@ -1080,7 +1080,7 @@ ; LMULMAX1-RV64-NEXT: vle16.v v9, (a0) ; LMULMAX1-RV64-NEXT: li a2, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v8, a2 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV64-NEXT: lui a3, 5 @@ -1102,7 +1102,7 @@ ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a6 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v9, a2 -; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV64-NEXT: vnot.v v9, v9 ; LMULMAX1-RV64-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3 @@ -1166,7 +1166,7 @@ ; LMULMAX2-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v10, v8, a1 -; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 @@ -1197,7 +1197,7 @@ ; LMULMAX2-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v10, v8, a1 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: lui a1, 349525 @@ -1230,7 +1230,7 @@ ; LMULMAX1-RV32-NEXT: vle32.v v9, (a0) ; LMULMAX1-RV32-NEXT: li a2, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v8, a2 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV32-NEXT: lui a3, 349525 @@ -1253,7 +1253,7 @@ ; LMULMAX1-RV32-NEXT: vmul.vx v8, v8, a6 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 24 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v9, a2 -; LMULMAX1-RV32-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV32-NEXT: vnot.v v9, v9 ; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a3 @@ -1279,7 +1279,7 @@ ; LMULMAX1-RV64-NEXT: vle32.v v9, (a0) ; LMULMAX1-RV64-NEXT: li a2, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v8, a2 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV64-NEXT: lui a3, 349525 @@ -1302,7 +1302,7 @@ ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a6 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 24 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v9, a2 -; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV64-NEXT: vnot.v v9, v9 ; LMULMAX1-RV64-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3 @@ -1415,7 +1415,7 @@ ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v10, v8, a1 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI7_0) ; LMULMAX2-RV64-NEXT: ld a1, %lo(.LCPI7_0)(a1) @@ -1514,7 +1514,7 @@ ; LMULMAX1-RV64-NEXT: vle64.v v9, (a0) ; LMULMAX1-RV64-NEXT: li a2, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v8, a2 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: lui a3, %hi(.LCPI7_0) ; LMULMAX1-RV64-NEXT: ld a3, %lo(.LCPI7_0)(a3) @@ -1538,7 +1538,7 @@ ; LMULMAX1-RV64-NEXT: li a7, 56 ; LMULMAX1-RV64-NEXT: vsrl.vx v8, v8, a7 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v9, a2 -; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV64-NEXT: vnot.v v9, v9 ; LMULMAX1-RV64-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3 @@ -1609,7 +1609,7 @@ ; LMULMAX8-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV64-NEXT: li a1, 1 ; LMULMAX8-RV64-NEXT: vsub.vx v10, v8, a1 -; LMULMAX8-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX8-RV64-NEXT: vnot.v v8, v8 ; LMULMAX8-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX8-RV64-NEXT: lui a1, %hi(.LCPI7_0) ; LMULMAX8-RV64-NEXT: ld a1, %lo(.LCPI7_0)(a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll @@ -573,7 +573,7 @@ ; CHECK-LABEL: truncstore_v2i16_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i16> %x to <2 x i8> @@ -637,7 +637,7 @@ ; CHECK-LABEL: truncstore_v4i16_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <4 x i16> %x to <4 x i8> @@ -723,7 +723,7 @@ ; CHECK-LABEL: truncstore_v8i16_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <8 x i16> %x to <8 x i8> @@ -847,8 +847,8 @@ ; LMULMAX1-LABEL: truncstore_v16i16_v16i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -857,7 +857,7 @@ ; LMULMAX4-LABEL: truncstore_v16i16_v16i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX4-NEXT: vse8.v v10, (a0) ; LMULMAX4-NEXT: ret %y = trunc <16 x i16> %x to <16 x i8> @@ -1031,9 +1031,9 @@ ; CHECK-LABEL: truncstore_v2i32_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i32> %x to <2 x i8> @@ -1045,7 +1045,7 @@ ; CHECK-LABEL: truncstore_v2i32_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i32> %x to <2 x i16> @@ -1083,9 +1083,9 @@ ; CHECK-LABEL: truncstore_v4i32_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <4 x i32> %x to <4 x i8> @@ -1097,7 +1097,7 @@ ; CHECK-LABEL: truncstore_v4i32_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <4 x i32> %x to <4 x i16> @@ -1157,13 +1157,13 @@ ; LMULMAX1-LABEL: truncstore_v8i32_v8i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1172,9 +1172,9 @@ ; LMULMAX4-LABEL: truncstore_v8i32_v8i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v10, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v10 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i32> %x to <8 x i8> @@ -1186,8 +1186,8 @@ ; LMULMAX1-LABEL: truncstore_v8i32_v8i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse16.v v8, (a0) @@ -1196,7 +1196,7 @@ ; LMULMAX4-LABEL: truncstore_v8i32_v8i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX4-NEXT: vse16.v v10, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i32> %x to <8 x i16> @@ -1270,25 +1270,25 @@ ; LMULMAX1-LABEL: truncstore_v16i32_v16i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 12 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1297,9 +1297,9 @@ ; LMULMAX4-LABEL: truncstore_v16i32_v16i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v12, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v12 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <16 x i32> %x to <16 x i8> @@ -1311,13 +1311,13 @@ ; LMULMAX1-LABEL: truncstore_v16i32_v16i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero -; LMULMAX1-NEXT: vnsrl.wx v10, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 +; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: addi a1, a0, 16 @@ -1328,7 +1328,7 @@ ; LMULMAX4-LABEL: truncstore_v16i32_v16i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 ; LMULMAX4-NEXT: vse16.v v12, (a0) ; LMULMAX4-NEXT: ret %y = trunc <16 x i32> %x to <16 x i16> @@ -1436,11 +1436,11 @@ ; CHECK-LABEL: truncstore_v2i64_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i64> %x to <2 x i8> @@ -1452,9 +1452,9 @@ ; CHECK-LABEL: truncstore_v2i64_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i64> %x to <2 x i16> @@ -1466,7 +1466,7 @@ ; CHECK-LABEL: truncstore_v2i64_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i64> %x to <2 x i32> @@ -1478,17 +1478,17 @@ ; LMULMAX1-LABEL: truncstore_v4i64_v4i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1497,11 +1497,11 @@ ; LMULMAX4-LABEL: truncstore_v4i64_v4i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v10, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v10 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <4 x i64> %x to <4 x i8> @@ -1513,13 +1513,13 @@ ; LMULMAX1-LABEL: truncstore_v4i64_v4i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse16.v v8, (a0) @@ -1528,9 +1528,9 @@ ; LMULMAX4-LABEL: truncstore_v4i64_v4i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v10, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v10 ; LMULMAX4-NEXT: vse16.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <4 x i64> %x to <4 x i16> @@ -1542,8 +1542,8 @@ ; LMULMAX1-LABEL: truncstore_v4i64_v4i32: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse32.v v8, (a0) @@ -1552,7 +1552,7 @@ ; LMULMAX4-LABEL: truncstore_v4i64_v4i32: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX4-NEXT: vse32.v v10, (a0) ; LMULMAX4-NEXT: ret %y = trunc <4 x i64> %x to <4 x i32> @@ -1564,33 +1564,33 @@ ; LMULMAX1-LABEL: truncstore_v8i64_v8i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1599,11 +1599,11 @@ ; LMULMAX4-LABEL: truncstore_v8i64_v8i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v12, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v12 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i64> %x to <8 x i8> @@ -1615,25 +1615,25 @@ ; LMULMAX1-LABEL: truncstore_v8i64_v8i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vse16.v v8, (a0) @@ -1642,9 +1642,9 @@ ; LMULMAX4-LABEL: truncstore_v8i64_v8i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v12, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v12 ; LMULMAX4-NEXT: vse16.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i64> %x to <8 x i16> @@ -1656,13 +1656,13 @@ ; LMULMAX1-LABEL: truncstore_v8i64_v8i32: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero -; LMULMAX1-NEXT: vnsrl.wx v10, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 +; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: addi a1, a0, 16 @@ -1673,7 +1673,7 @@ ; LMULMAX4-LABEL: truncstore_v8i64_v8i32: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 ; LMULMAX4-NEXT: vse32.v v12, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i64> %x to <8 x i32> @@ -1685,65 +1685,65 @@ ; LMULMAX1-LABEL: truncstore_v16i64_v16i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 6, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v12, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 10, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v13, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 10 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v14, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v14 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 14, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 12 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v15, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 14 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1752,17 +1752,17 @@ ; LMULMAX4-LABEL: truncstore_v16i64_v16i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v16, v12, zero +; LMULMAX4-NEXT: vncvt.x.x.w v16, v12 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v16, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v16 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v12, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v12 ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v14, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v14, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v14, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v14 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX4-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX4-NEXT: vse8.v v8, (a0) @@ -1776,47 +1776,47 @@ ; LMULMAX1-LABEL: truncstore_v16i64_v16i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v13, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v10, v12, zero +; LMULMAX1-NEXT: vncvt.x.x.w v10, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v10, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v14, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v14 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v15, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 6 ; LMULMAX1-NEXT: addi a1, a0, 16 @@ -1827,13 +1827,13 @@ ; LMULMAX4-LABEL: truncstore_v16i64_v16i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v16, v12, zero +; LMULMAX4-NEXT: vncvt.x.x.w v16, v12 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v16, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v16 ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v14, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v14, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v14, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v14 ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, tu, mu ; LMULMAX4-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX4-NEXT: vse16.v v8, (a0) @@ -1847,23 +1847,23 @@ ; LMULMAX1-LABEL: truncstore_v16i64_v16i32: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero -; LMULMAX1-NEXT: vnsrl.wx v10, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 +; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v13, zero -; LMULMAX1-NEXT: vnsrl.wx v11, v12, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 +; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v11, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v15, zero -; LMULMAX1-NEXT: vnsrl.wx v12, v14, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 +; LMULMAX1-NEXT: vncvt.x.x.w v12, v14 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v9, 2 ; LMULMAX1-NEXT: addi a1, a0, 48 @@ -1878,8 +1878,8 @@ ; LMULMAX4-LABEL: truncstore_v16i64_v16i32: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v16, v12, zero -; LMULMAX4-NEXT: vnsrl.wx v12, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v16, v12 +; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, tu, mu ; LMULMAX4-NEXT: vslideup.vi v12, v16, 8 ; LMULMAX4-NEXT: vse32.v v12, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll @@ -201,7 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x @@ -229,7 +229,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x @@ -243,7 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x @@ -258,7 +258,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x @@ -273,7 +273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x @@ -1191,7 +1191,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX2-NEXT: vle16.v v8, (a0) -; LMULMAX2-NEXT: vfsgnjn.vv v8, v8, v8 +; LMULMAX2-NEXT: vfneg.v v8, v8 ; LMULMAX2-NEXT: vse16.v v8, (a0) ; LMULMAX2-NEXT: ret ; @@ -1201,8 +1201,8 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle16.v v8, (a1) ; LMULMAX1-NEXT: vle16.v v9, (a0) -; LMULMAX1-NEXT: vfsgnjn.vv v8, v8, v8 -; LMULMAX1-NEXT: vfsgnjn.vv v9, v9, v9 +; LMULMAX1-NEXT: vfneg.v v8, v8 +; LMULMAX1-NEXT: vfneg.v v9, v9 ; LMULMAX1-NEXT: vse16.v v9, (a0) ; LMULMAX1-NEXT: vse16.v v8, (a1) ; LMULMAX1-NEXT: ret @@ -1217,7 +1217,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v8, (a0) -; LMULMAX2-NEXT: vfsgnjn.vv v8, v8, v8 +; LMULMAX2-NEXT: vfneg.v v8, v8 ; LMULMAX2-NEXT: vse32.v v8, (a0) ; LMULMAX2-NEXT: ret ; @@ -1227,8 +1227,8 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle32.v v8, (a1) ; LMULMAX1-NEXT: vle32.v v9, (a0) -; LMULMAX1-NEXT: vfsgnjn.vv v8, v8, v8 -; LMULMAX1-NEXT: vfsgnjn.vv v9, v9, v9 +; LMULMAX1-NEXT: vfneg.v v8, v8 +; LMULMAX1-NEXT: vfneg.v v9, v9 ; LMULMAX1-NEXT: vse32.v v9, (a0) ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: ret @@ -1243,7 +1243,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-NEXT: vle64.v v8, (a0) -; LMULMAX2-NEXT: vfsgnjn.vv v8, v8, v8 +; LMULMAX2-NEXT: vfneg.v v8, v8 ; LMULMAX2-NEXT: vse64.v v8, (a0) ; LMULMAX2-NEXT: ret ; @@ -1253,8 +1253,8 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle64.v v8, (a1) ; LMULMAX1-NEXT: vle64.v v9, (a0) -; LMULMAX1-NEXT: vfsgnjn.vv v8, v8, v8 -; LMULMAX1-NEXT: vfsgnjn.vv v9, v9, v9 +; LMULMAX1-NEXT: vfneg.v v8, v8 +; LMULMAX1-NEXT: vfneg.v v9, v9 ; LMULMAX1-NEXT: vse64.v v9, (a0) ; LMULMAX1-NEXT: vse64.v v8, (a1) ; LMULMAX1-NEXT: ret @@ -1971,7 +1971,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI91_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI91_0)(a1) -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -1993,7 +1993,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI92_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI92_0)(a1) -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -2015,7 +2015,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI93_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI93_0)(a1) -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -2044,7 +2044,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI94_1)(a1) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -2071,7 +2071,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI95_1)(a1) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -2098,7 +2098,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI96_1)(a1) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -2125,7 +2125,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI97_1)(a1) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -2152,7 +2152,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI98_1)(a1) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -2179,7 +2179,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI99_1)(a1) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -2201,7 +2201,7 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI100_0)(a1) ; CHECK-NEXT: lui a1, %hi(.LCPI100_1) ; CHECK-NEXT: flh ft1, %lo(.LCPI100_1)(a1) -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -2226,7 +2226,7 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI101_0)(a1) ; CHECK-NEXT: lui a1, %hi(.LCPI101_1) ; CHECK-NEXT: flw ft1, %lo(.LCPI101_1)(a1) -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -2251,7 +2251,7 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI102_0)(a1) ; CHECK-NEXT: lui a1, %hi(.LCPI102_1) ; CHECK-NEXT: fld ft1, %lo(.LCPI102_1)(a1) -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll @@ -348,9 +348,9 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x @@ -367,9 +367,9 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x @@ -410,9 +410,9 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; LMULMAX8-NEXT: vfncvt.rtz.x.f.w v12, v8 ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v8, v12, zero +; LMULMAX8-NEXT: vncvt.x.x.w v8, v12 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX8-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX8-NEXT: vse8.v v8, (a1) ; LMULMAX8-NEXT: ret ; @@ -429,31 +429,31 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v12, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v10, v12, zero +; LMULMAX1-NEXT: vncvt.x.x.w v10, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v10, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v12, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v11, v12, zero +; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v11, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v11, v11 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v11, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v9, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX1-NEXT: vse8.v v10, (a1) @@ -472,9 +472,9 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; LMULMAX8-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v8, v12, zero +; LMULMAX8-NEXT: vncvt.x.x.w v8, v12 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX8-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX8-NEXT: vse8.v v8, (a1) ; LMULMAX8-NEXT: ret ; @@ -491,31 +491,31 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v12, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v10, v12, zero +; LMULMAX1-NEXT: vncvt.x.x.w v10, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v10, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v12, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v11, v12, zero +; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v11, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v11, v11 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v11, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX1-NEXT: vse8.v v10, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll @@ -169,9 +169,9 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x @@ -186,9 +186,9 @@ ; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX8-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v8, v10, zero +; LMULMAX8-NEXT: vncvt.x.x.w v8, v10 ; LMULMAX8-NEXT: vse8.v v8, (a1) ; LMULMAX8-NEXT: ret ; @@ -197,9 +197,9 @@ ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX2-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX2-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX2-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX2-NEXT: vnsrl.wx v8, v10, zero +; LMULMAX2-NEXT: vncvt.x.x.w v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a1) ; LMULMAX2-NEXT: ret ; @@ -210,13 +210,13 @@ ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v9, (a0) ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse8.v v8, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -6493,7 +6493,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x @@ -6509,7 +6509,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x @@ -6525,7 +6525,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x @@ -6541,7 +6541,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -1037,7 +1037,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1064,7 +1064,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1115,7 +1115,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1142,7 +1142,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1192,7 +1192,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1219,7 +1219,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1245,7 +1245,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1907,7 +1907,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1934,7 +1934,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1985,7 +1985,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -2012,7 +2012,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -2062,7 +2062,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -2089,7 +2089,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -2115,7 +2115,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll @@ -44,14 +44,14 @@ ; RV32-LABEL: mscatter_v2i16_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i16_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i16> %val to <2 x i8> @@ -63,18 +63,18 @@ ; RV32-LABEL: mscatter_v2i32_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i32_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i32> %val to <2 x i8> @@ -86,22 +86,22 @@ ; RV32-LABEL: mscatter_v2i64_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i8> @@ -236,14 +236,14 @@ ; RV32-LABEL: mscatter_v2i32_truncstore_v2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i32_truncstore_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i32> %val to <2 x i16> @@ -255,18 +255,18 @@ ; RV32-LABEL: mscatter_v2i64_truncstore_v2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i16> @@ -474,14 +474,14 @@ ; RV32-LABEL: mscatter_v2i64_truncstore_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i32> @@ -843,7 +843,7 @@ ; RV32-NEXT: vsext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -868,7 +868,7 @@ ; RV32-NEXT: vzext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -915,7 +915,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -940,7 +940,7 @@ ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -986,7 +986,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1011,7 +1011,7 @@ ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1035,7 +1035,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1637,7 +1637,7 @@ ; RV32-NEXT: vsext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1662,7 +1662,7 @@ ; RV32-NEXT: vzext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1709,7 +1709,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1734,7 +1734,7 @@ ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1780,7 +1780,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1805,7 +1805,7 @@ ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1829,7 +1829,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll @@ -20,7 +20,7 @@ ; CHECK-LABEL: vfneg_vv_v2f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer @@ -44,7 +44,7 @@ ; CHECK-LABEL: vfneg_vv_v4f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer @@ -68,7 +68,7 @@ ; CHECK-LABEL: vfneg_vv_v8f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer @@ -92,7 +92,7 @@ ; CHECK-LABEL: vfneg_vv_v16f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer @@ -116,7 +116,7 @@ ; CHECK-LABEL: vfneg_vv_v2f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer @@ -140,7 +140,7 @@ ; CHECK-LABEL: vfneg_vv_v4f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer @@ -164,7 +164,7 @@ ; CHECK-LABEL: vfneg_vv_v8f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer @@ -188,7 +188,7 @@ ; CHECK-LABEL: vfneg_vv_v16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer @@ -212,7 +212,7 @@ ; CHECK-LABEL: vfneg_vv_v2f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer @@ -236,7 +236,7 @@ ; CHECK-LABEL: vfneg_vv_v4f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer @@ -260,7 +260,7 @@ ; CHECK-LABEL: vfneg_vv_v8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer @@ -284,7 +284,7 @@ ; CHECK-LABEL: vfneg_vv_v15f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer @@ -308,7 +308,7 @@ ; CHECK-LABEL: vfneg_vv_v16f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer @@ -356,13 +356,13 @@ ; CHECK-NEXT: .LBB27_2: ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfsgnjn.vv v16, v16, v16 +; CHECK-NEXT: vfneg.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB27_4: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -965,7 +965,7 @@ ; RV32-NEXT: vsext.vf8 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -991,7 +991,7 @@ ; RV32-NEXT: vzext.vf8 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1040,7 +1040,7 @@ ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1066,7 +1066,7 @@ ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1114,7 +1114,7 @@ ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1140,7 +1140,7 @@ ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1165,7 +1165,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1702,7 +1702,7 @@ ; RV32-NEXT: vsext.vf8 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1728,7 +1728,7 @@ ; RV32-NEXT: vzext.vf8 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1777,7 +1777,7 @@ ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1803,7 +1803,7 @@ ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1851,7 +1851,7 @@ ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1877,7 +1877,7 @@ ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1902,7 +1902,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -2055,7 +2055,7 @@ ; RV32-NEXT: vsext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v12, v16, zero +; RV32-NEXT: vncvt.x.x.w v12, v16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v10, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2068,7 +2068,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v24, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v4, v24, zero +; RV32-NEXT: vncvt.x.x.w v4, v24 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v10 ; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t @@ -2128,7 +2128,7 @@ ; RV32-NEXT: vzext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v12, v16, zero +; RV32-NEXT: vncvt.x.x.w v12, v16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v10, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2141,7 +2141,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v24, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v4, v24, zero +; RV32-NEXT: vncvt.x.x.w v4, v24 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v10 ; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t @@ -2265,7 +2265,7 @@ ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v16, zero +; RV32-NEXT: vncvt.x.x.w v8, v16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v12, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2278,7 +2278,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v24, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v4, v24, zero +; RV32-NEXT: vncvt.x.x.w v4, v24 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v12 ; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t @@ -2338,7 +2338,7 @@ ; RV32-NEXT: vzext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v16, zero +; RV32-NEXT: vncvt.x.x.w v8, v16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v12, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2351,7 +2351,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v24, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v4, v24, zero +; RV32-NEXT: vncvt.x.x.w v4, v24 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v12 ; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t @@ -2474,7 +2474,7 @@ ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v4, v8, zero +; RV32-NEXT: vncvt.x.x.w v4, v8 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v1, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2487,7 +2487,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t @@ -2547,7 +2547,7 @@ ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v4, v8, zero +; RV32-NEXT: vncvt.x.x.w v4, v8 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v1, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2560,7 +2560,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t @@ -2616,7 +2616,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v28, v16, zero +; RV32-NEXT: vncvt.x.x.w v28, v16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v24, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2629,7 +2629,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v28, v8, zero +; RV32-NEXT: vncvt.x.x.w v28, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v24 ; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll @@ -26,7 +26,7 @@ ; RV32-LABEL: vpscatter_v2i16_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -34,7 +34,7 @@ ; RV64-LABEL: vpscatter_v2i16_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -47,9 +47,9 @@ ; RV32-LABEL: vpscatter_v2i32_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -57,9 +57,9 @@ ; RV64-LABEL: vpscatter_v2i32_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -72,11 +72,11 @@ ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -84,11 +84,11 @@ ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -194,7 +194,7 @@ ; RV32-LABEL: vpscatter_v2i32_truncstore_v2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -202,7 +202,7 @@ ; RV64-LABEL: vpscatter_v2i32_truncstore_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -215,9 +215,9 @@ ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -225,9 +225,9 @@ ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -442,7 +442,7 @@ ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -450,7 +450,7 @@ ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -779,7 +779,7 @@ ; RV32-NEXT: vsext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -805,7 +805,7 @@ ; RV32-NEXT: vzext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -854,7 +854,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -880,7 +880,7 @@ ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -928,7 +928,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -954,7 +954,7 @@ ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -979,7 +979,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1500,7 +1500,7 @@ ; RV32-NEXT: vsext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1526,7 +1526,7 @@ ; RV32-NEXT: vzext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1575,7 +1575,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1601,7 +1601,7 @@ ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1649,7 +1649,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1675,7 +1675,7 @@ ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1700,7 +1700,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1924,7 +1924,7 @@ ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: addi a1, a2, -16 ; RV32-NEXT: addi a4, sp, 16 @@ -1937,7 +1937,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu @@ -2058,7 +2058,7 @@ ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: addi a1, a2, -16 ; RV32-NEXT: addi a4, sp, 16 @@ -2071,7 +2071,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll @@ -120,7 +120,7 @@ ; CHECK-LABEL: vxor_vi_v2i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer @@ -222,7 +222,7 @@ ; CHECK-LABEL: vxor_vi_v4i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer @@ -324,7 +324,7 @@ ; CHECK-LABEL: vxor_vi_v8i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer @@ -426,7 +426,7 @@ ; CHECK-LABEL: vxor_vi_v9i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <9 x i8> poison, i8 -1, i32 0 %vb = shufflevector <9 x i8> %elt.head, <9 x i8> poison, <9 x i32> zeroinitializer @@ -528,7 +528,7 @@ ; CHECK-LABEL: vxor_vi_v16i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer @@ -630,7 +630,7 @@ ; CHECK-LABEL: vxor_vi_v2i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer @@ -732,7 +732,7 @@ ; CHECK-LABEL: vxor_vi_v4i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer @@ -834,7 +834,7 @@ ; CHECK-LABEL: vxor_vi_v8i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer @@ -936,7 +936,7 @@ ; CHECK-LABEL: vxor_vi_v16i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer @@ -1038,7 +1038,7 @@ ; CHECK-LABEL: vxor_vi_v2i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer @@ -1140,7 +1140,7 @@ ; CHECK-LABEL: vxor_vi_v4i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer @@ -1242,7 +1242,7 @@ ; CHECK-LABEL: vxor_vi_v8i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: vxor_vi_v16i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer @@ -1474,7 +1474,7 @@ ; CHECK-LABEL: vxor_vi_v2i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer @@ -1604,7 +1604,7 @@ ; CHECK-LABEL: vxor_vi_v4i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer @@ -1734,7 +1734,7 @@ ; CHECK-LABEL: vxor_vi_v8i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer @@ -1864,7 +1864,7 @@ ; CHECK-LABEL: vxor_vi_v16i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll @@ -12,7 +12,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI0_1) ; CHECK-NEXT: flh ft1, %lo(.LCPI0_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -33,7 +33,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI1_1) ; CHECK-NEXT: flh ft1, %lo(.LCPI1_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -54,7 +54,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI2_1) ; CHECK-NEXT: flh ft1, %lo(.LCPI2_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -75,7 +75,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI3_1) ; CHECK-NEXT: flh ft1, %lo(.LCPI3_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfadd.vf v10, v10, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v10 @@ -96,7 +96,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI4_1) ; CHECK-NEXT: flh ft1, %lo(.LCPI4_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfadd.vf v12, v12, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v12 @@ -117,7 +117,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI5_1) ; CHECK-NEXT: flh ft1, %lo(.LCPI5_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfadd.vf v16, v16, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16 @@ -138,7 +138,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI6_1) ; CHECK-NEXT: flw ft1, %lo(.LCPI6_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -159,7 +159,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI7_1) ; CHECK-NEXT: flw ft1, %lo(.LCPI7_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -180,7 +180,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI8_1) ; CHECK-NEXT: flw ft1, %lo(.LCPI8_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfadd.vf v10, v10, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v10 @@ -201,7 +201,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI9_1) ; CHECK-NEXT: flw ft1, %lo(.LCPI9_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfadd.vf v12, v12, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v12 @@ -222,7 +222,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI10_1) ; CHECK-NEXT: flw ft1, %lo(.LCPI10_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfadd.vf v16, v16, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16 @@ -243,7 +243,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI11_1) ; CHECK-NEXT: fld ft1, %lo(.LCPI11_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -264,7 +264,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI12_1) ; CHECK-NEXT: fld ft1, %lo(.LCPI12_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfadd.vf v10, v10, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v10 @@ -285,7 +285,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI13_1) ; CHECK-NEXT: fld ft1, %lo(.LCPI13_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfadd.vf v12, v12, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v12 @@ -306,7 +306,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI14_1) ; CHECK-NEXT: fld ft1, %lo(.LCPI14_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfadd.vf v16, v16, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -28,7 +28,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -46,7 +46,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -64,7 +64,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vfcvt.f.x.v v10, v10 @@ -82,7 +82,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8 ; CHECK-NEXT: vfcvt.f.x.v v12, v12 @@ -100,7 +100,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8 ; CHECK-NEXT: vfcvt.f.x.v v16, v16 @@ -118,7 +118,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI6_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -136,7 +136,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI7_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -154,7 +154,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI8_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vfcvt.f.x.v v10, v10 @@ -172,7 +172,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI9_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8 ; CHECK-NEXT: vfcvt.f.x.v v12, v12 @@ -190,7 +190,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI10_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8 ; CHECK-NEXT: vfcvt.f.x.v v16, v16 @@ -208,7 +208,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI11_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI11_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -226,7 +226,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI12_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI12_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vfcvt.f.x.v v10, v10 @@ -244,7 +244,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI13_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI13_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8 ; CHECK-NEXT: vfcvt.f.x.v v12, v12 @@ -262,7 +262,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI14_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI14_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8 ; CHECK-NEXT: vfcvt.f.x.v v16, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll b/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll --- a/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll +++ b/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll @@ -6,9 +6,9 @@ ; CHECK-LABEL: trunc_nxv4i32_to_nxv4i5: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %v = trunc %a to ret %v @@ -18,9 +18,9 @@ ; CHECK-LABEL: trunc_nxv1i32_to_nxv1i5: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %v = trunc %a to ret %v diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -1043,7 +1043,7 @@ ; RV32-NEXT: vsext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1070,7 +1070,7 @@ ; RV32-NEXT: vzext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1121,7 +1121,7 @@ ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1148,7 +1148,7 @@ ; RV32-NEXT: vzext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1198,7 +1198,7 @@ ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1225,7 +1225,7 @@ ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1251,7 +1251,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1979,7 +1979,7 @@ ; RV32-NEXT: vsext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2006,7 +2006,7 @@ ; RV32-NEXT: vzext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2057,7 +2057,7 @@ ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2084,7 +2084,7 @@ ; RV32-NEXT: vzext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2134,7 +2134,7 @@ ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2161,7 +2161,7 @@ ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2187,7 +2187,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll @@ -44,14 +44,14 @@ ; RV32-LABEL: mscatter_nxv2i16_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i16_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -63,18 +63,18 @@ ; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -86,22 +86,22 @@ ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV32-NEXT: vnsrl.wx v11, v8, zero +; RV32-NEXT: vncvt.x.x.w v11, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v11, zero +; RV32-NEXT: vncvt.x.x.w v8, v11 ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV64-NEXT: vnsrl.wx v12, v8, zero +; RV64-NEXT: vncvt.x.x.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v12, zero +; RV64-NEXT: vncvt.x.x.w v8, v12 ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -236,14 +236,14 @@ ; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -255,18 +255,18 @@ ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV32-NEXT: vnsrl.wx v11, v8, zero +; RV32-NEXT: vncvt.x.x.w v11, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v11, zero +; RV32-NEXT: vncvt.x.x.w v8, v11 ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV64-NEXT: vnsrl.wx v12, v8, zero +; RV64-NEXT: vncvt.x.x.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v12, zero +; RV64-NEXT: vncvt.x.x.w v8, v12 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -474,14 +474,14 @@ ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV32-NEXT: vnsrl.wx v11, v8, zero +; RV32-NEXT: vncvt.x.x.w v11, v8 ; RV32-NEXT: vsoxei32.v v11, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV64-NEXT: vnsrl.wx v12, v8, zero +; RV64-NEXT: vncvt.x.x.w v12, v8 ; RV64-NEXT: vsoxei64.v v12, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -843,7 +843,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -868,7 +868,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -915,7 +915,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -940,7 +940,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -986,7 +986,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1011,7 +1011,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1035,7 +1035,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1637,7 +1637,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1662,7 +1662,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1709,7 +1709,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1734,7 +1734,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1780,7 +1780,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1805,7 +1805,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1829,7 +1829,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vfabs_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv1f16( %v) ret %r @@ -22,7 +22,7 @@ ; CHECK-LABEL: vfabs_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv2f16( %v) ret %r @@ -34,7 +34,7 @@ ; CHECK-LABEL: vfabs_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv4f16( %v) ret %r @@ -46,7 +46,7 @@ ; CHECK-LABEL: vfabs_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv8f16( %v) ret %r @@ -58,7 +58,7 @@ ; CHECK-LABEL: vfabs_nxv16f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv16f16( %v) ret %r @@ -70,7 +70,7 @@ ; CHECK-LABEL: vfabs_nxv32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv32f16( %v) ret %r @@ -82,7 +82,7 @@ ; CHECK-LABEL: vfabs_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv1f32( %v) ret %r @@ -94,7 +94,7 @@ ; CHECK-LABEL: vfabs_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv2f32( %v) ret %r @@ -106,7 +106,7 @@ ; CHECK-LABEL: vfabs_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv4f32( %v) ret %r @@ -118,7 +118,7 @@ ; CHECK-LABEL: vfabs_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv8f32( %v) ret %r @@ -130,7 +130,7 @@ ; CHECK-LABEL: vfabs_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv16f32( %v) ret %r @@ -142,7 +142,7 @@ ; CHECK-LABEL: vfabs_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv1f64( %v) ret %r @@ -154,7 +154,7 @@ ; CHECK-LABEL: vfabs_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv2f64( %v) ret %r @@ -166,7 +166,7 @@ ; CHECK-LABEL: vfabs_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv4f64( %v) ret %r @@ -178,7 +178,7 @@ ; CHECK-LABEL: vfabs_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv8f64( %v) ret %r diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: vfneg_vv_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -18,7 +18,7 @@ ; CHECK-LABEL: vfneg_vv_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -28,7 +28,7 @@ ; CHECK-LABEL: vfneg_vv_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -38,7 +38,7 @@ ; CHECK-LABEL: vfneg_vv_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -48,7 +48,7 @@ ; CHECK-LABEL: vfneg_vv_nxv16f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -58,7 +58,7 @@ ; CHECK-LABEL: vfneg_vv_nxv32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -68,7 +68,7 @@ ; CHECK-LABEL: vfneg_vv_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -78,7 +78,7 @@ ; CHECK-LABEL: vfneg_vv_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -88,7 +88,7 @@ ; CHECK-LABEL: vfneg_vv_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -98,7 +98,7 @@ ; CHECK-LABEL: vfneg_vv_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -108,7 +108,7 @@ ; CHECK-LABEL: vfneg_vv_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -118,7 +118,7 @@ ; CHECK-LABEL: vfneg_vv_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -128,7 +128,7 @@ ; CHECK-LABEL: vfneg_vv_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -138,7 +138,7 @@ ; CHECK-LABEL: vfneg_vv_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -148,7 +148,7 @@ ; CHECK-LABEL: vfneg_vv_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll @@ -20,7 +20,7 @@ ; CHECK-LABEL: vfneg_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -44,7 +44,7 @@ ; CHECK-LABEL: vfneg_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -68,7 +68,7 @@ ; CHECK-LABEL: vfneg_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -92,7 +92,7 @@ ; CHECK-LABEL: vfneg_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -116,7 +116,7 @@ ; CHECK-LABEL: vfneg_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -140,7 +140,7 @@ ; CHECK-LABEL: vfneg_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -164,7 +164,7 @@ ; CHECK-LABEL: vfneg_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -188,7 +188,7 @@ ; CHECK-LABEL: vfneg_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -212,7 +212,7 @@ ; CHECK-LABEL: vfneg_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -236,7 +236,7 @@ ; CHECK-LABEL: vfneg_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -260,7 +260,7 @@ ; CHECK-LABEL: vfneg_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -284,7 +284,7 @@ ; CHECK-LABEL: vfneg_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -308,7 +308,7 @@ ; CHECK-LABEL: vfneg_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -332,7 +332,7 @@ ; CHECK-LABEL: vfneg_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -356,7 +356,7 @@ ; CHECK-LABEL: vfneg_vv_nxv7f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -380,7 +380,7 @@ ; CHECK-LABEL: vfneg_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -431,13 +431,13 @@ ; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: sub a1, a0, a1 -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: bltu a0, a1, .LBB33_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB33_4: ; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v16, v16, v16 +; CHECK-NEXT: vfneg.v v16, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll @@ -636,7 +636,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -648,7 +648,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -748,7 +748,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -760,7 +760,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -860,7 +860,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -872,7 +872,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -972,7 +972,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -984,7 +984,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1084,7 +1084,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1096,7 +1096,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1174,9 +1174,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1188,9 +1188,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1202,7 +1202,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1214,7 +1214,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1292,9 +1292,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1306,9 +1306,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1320,7 +1320,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1332,7 +1332,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1410,9 +1410,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1424,9 +1424,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1438,7 +1438,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1450,7 +1450,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1528,9 +1528,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v16, zero +; CHECK-NEXT: vncvt.x.x.w v10, v16 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1542,9 +1542,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v16, zero +; CHECK-NEXT: vncvt.x.x.w v10, v16 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1556,7 +1556,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1568,7 +1568,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll @@ -1056,7 +1056,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1082,7 +1082,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1131,7 +1131,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1157,7 +1157,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1205,7 +1205,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1231,7 +1231,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1256,7 +1256,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1851,7 +1851,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1877,7 +1877,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1926,7 +1926,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1952,7 +1952,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2000,7 +2000,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2026,7 +2026,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2051,7 +2051,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2117,7 +2117,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2143,7 +2143,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2192,7 +2192,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2218,7 +2218,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2266,7 +2266,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2292,7 +2292,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2317,7 +2317,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2466,7 +2466,7 @@ ; RV32-NEXT: vsext.vf4 v24, v10 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: srli a3, a2, 3 @@ -2480,7 +2480,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2536,7 +2536,7 @@ ; RV32-NEXT: vzext.vf4 v24, v10 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: srli a3, a2, 3 @@ -2550,7 +2550,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll @@ -44,7 +44,7 @@ ; RV32-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -52,7 +52,7 @@ ; RV64-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -65,9 +65,9 @@ ; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -75,9 +75,9 @@ ; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -90,11 +90,11 @@ ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV32-NEXT: vnsrl.wx v11, v8, zero +; RV32-NEXT: vncvt.x.x.w v11, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v11, zero +; RV32-NEXT: vncvt.x.x.w v8, v11 ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret @@ -102,11 +102,11 @@ ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV64-NEXT: vnsrl.wx v12, v8, zero +; RV64-NEXT: vncvt.x.x.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v12, zero +; RV64-NEXT: vncvt.x.x.w v8, v12 ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -230,7 +230,7 @@ ; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -238,7 +238,7 @@ ; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -251,9 +251,9 @@ ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV32-NEXT: vnsrl.wx v11, v8, zero +; RV32-NEXT: vncvt.x.x.w v11, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v11, zero +; RV32-NEXT: vncvt.x.x.w v8, v11 ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret @@ -261,9 +261,9 @@ ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV64-NEXT: vnsrl.wx v12, v8, zero +; RV64-NEXT: vncvt.x.x.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v12, zero +; RV64-NEXT: vncvt.x.x.w v8, v12 ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -460,7 +460,7 @@ ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV32-NEXT: vnsrl.wx v11, v8, zero +; RV32-NEXT: vncvt.x.x.w v11, v8 ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v11, (zero), v10, v0.t ; RV32-NEXT: ret @@ -468,7 +468,7 @@ ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV64-NEXT: vnsrl.wx v12, v8, zero +; RV64-NEXT: vncvt.x.x.w v12, v8 ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v12, (zero), v10, v0.t ; RV64-NEXT: ret @@ -815,7 +815,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -841,7 +841,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -890,7 +890,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -916,7 +916,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -964,7 +964,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -990,7 +990,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1015,7 +1015,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1590,7 +1590,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1616,7 +1616,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1665,7 +1665,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1691,7 +1691,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1739,7 +1739,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1765,7 +1765,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1790,7 +1790,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1855,7 +1855,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1881,7 +1881,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1930,7 +1930,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1956,7 +1956,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2004,7 +2004,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2030,7 +2030,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2055,7 +2055,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2232,7 +2232,7 @@ ; RV32-NEXT: vsext.vf4 v16, v26 ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: addi a3, sp, 16 ; RV32-NEXT: vl8re8.v v8, (a3) # Unknown-size Folded Reload @@ -2248,7 +2248,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 @@ -2348,7 +2348,7 @@ ; RV32-NEXT: vzext.vf4 v16, v26 ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: addi a3, sp, 16 ; RV32-NEXT: vl8re8.v v8, (a3) # Unknown-size Folded Reload @@ -2364,7 +2364,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -507,7 +507,7 @@ ; CHECK-NEXT: # %bb.1: # %if ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v10, (a1) -; CHECK-NEXT: vwadd.vx v8, v10, zero +; CHECK-NEXT: vwcvt.x.x.v v8, v10 ; CHECK-NEXT: .LBB9_2: # %if.end ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll @@ -6,7 +6,7 @@ ; CHECK-LABEL: vtrunc_nxv1i16_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -16,7 +16,7 @@ ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -26,7 +26,7 @@ ; CHECK-LABEL: vtrunc_nxv4i16_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -36,7 +36,7 @@ ; CHECK-LABEL: vtrunc_nxv8i16_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -47,7 +47,7 @@ ; CHECK-LABEL: vtrunc_nxv16i16_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -58,9 +58,9 @@ ; CHECK-LABEL: vtrunc_nxv1i32_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -70,7 +70,7 @@ ; CHECK-LABEL: vtrunc_nxv1i32_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -80,9 +80,9 @@ ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -92,7 +92,7 @@ ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -102,9 +102,9 @@ ; CHECK-LABEL: vtrunc_nxv4i32_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -114,7 +114,7 @@ ; CHECK-LABEL: vtrunc_nxv4i32_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -125,9 +125,9 @@ ; CHECK-LABEL: vtrunc_nxv8i32_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -137,7 +137,7 @@ ; CHECK-LABEL: vtrunc_nxv8i32_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -148,9 +148,9 @@ ; CHECK-LABEL: vtrunc_nxv16i32_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -160,7 +160,7 @@ ; CHECK-LABEL: vtrunc_nxv16i32_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -171,11 +171,11 @@ ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -185,9 +185,9 @@ ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -197,7 +197,7 @@ ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -207,11 +207,11 @@ ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -221,9 +221,9 @@ ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -233,7 +233,7 @@ ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -244,11 +244,11 @@ ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -258,9 +258,9 @@ ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -270,7 +270,7 @@ ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -281,11 +281,11 @@ ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v16, zero +; CHECK-NEXT: vncvt.x.x.w v10, v16 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -295,9 +295,9 @@ ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -307,7 +307,7 @@ ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %tvec = trunc %va to diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll @@ -28,7 +28,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -87,7 +87,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -146,7 +146,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -205,7 +205,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -264,7 +264,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -323,7 +323,7 @@ ; CHECK-LABEL: vxor_vi_nxv32i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -382,7 +382,7 @@ ; CHECK-LABEL: vxor_vi_nxv64i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -441,7 +441,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -500,7 +500,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -559,7 +559,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -618,7 +618,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -677,7 +677,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -736,7 +736,7 @@ ; CHECK-LABEL: vxor_vi_nxv32i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -795,7 +795,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -854,7 +854,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -913,7 +913,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -972,7 +972,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1031,7 +1031,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1103,7 +1103,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1175,7 +1175,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1247,7 +1247,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1319,7 +1319,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll @@ -110,7 +110,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -212,7 +212,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -314,7 +314,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -416,7 +416,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -518,7 +518,7 @@ ; CHECK-LABEL: vxor_vi_nxv15i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -620,7 +620,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -722,7 +722,7 @@ ; CHECK-LABEL: vxor_vi_nxv32i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -824,7 +824,7 @@ ; CHECK-LABEL: vxor_vi_nxv64i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -938,7 +938,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1040,7 +1040,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1142,7 +1142,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1244,7 +1244,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1346,7 +1346,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1448,7 +1448,7 @@ ; CHECK-LABEL: vxor_vi_nxv32i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1550,7 +1550,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1652,7 +1652,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1754,7 +1754,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1856,7 +1856,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1958,7 +1958,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2088,7 +2088,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2218,7 +2218,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2348,7 +2348,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2478,7 +2478,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer diff --git a/llvm/test/MC/RISCV/rvv/add.s b/llvm/test/MC/RISCV/rvv/add.s --- a/llvm/test/MC/RISCV/rvv/add.s +++ b/llvm/test/MC/RISCV/rvv/add.s @@ -345,7 +345,7 @@ # CHECK-UNKNOWN: 57 64 40 c4 vwcvt.x.x.v v8, v4 -# CHECK-INST: vwadd.vx v8, v4, zero +# CHECK-INST: vwcvt.x.x.v v8, v4 # CHECK-ENCODING: [0x57,0x64,0x40,0xc6] # CHECK-ERROR: instruction requires the following: 'V' (Vector Extension for Application Processors), 'Zve32x' or 'Zve64x' (Vector Extensions for Embedded Processors) # CHECK-UNKNOWN: 57 64 40 c6 @@ -357,7 +357,7 @@ # CHECK-UNKNOWN: 57 64 40 c0 vwcvtu.x.x.v v8, v4 -# CHECK-INST: vwaddu.vx v8, v4, zero +# CHECK-INST: vwcvtu.x.x.v v8, v4 # CHECK-ENCODING: [0x57,0x64,0x40,0xc2] # CHECK-ERROR: instruction requires the following: 'V' (Vector Extension for Application Processors), 'Zve32x' or 'Zve64x' (Vector Extensions for Embedded Processors) # CHECK-UNKNOWN: 57 64 40 c2 diff --git a/llvm/test/MC/RISCV/rvv/xor.s b/llvm/test/MC/RISCV/rvv/xor.s --- a/llvm/test/MC/RISCV/rvv/xor.s +++ b/llvm/test/MC/RISCV/rvv/xor.s @@ -51,7 +51,7 @@ # CHECK-UNKNOWN: 57 b4 4f 2c vnot.v v8, v4 -# CHECK-INST: vxor.vi v8, v4, -1 +# CHECK-INST: vnot.v v8, v4 # CHECK-ENCODING: [0x57,0xb4,0x4f,0x2e] # CHECK-ERROR: instruction requires the following: 'V' (Vector Extension for Application Processors), 'Zve32x' or 'Zve64x' (Vector Extensions for Embedded Processors) # CHECK-UNKNOWN: 57 b4 4f 2e diff --git a/llvm/utils/TableGen/AsmWriterEmitter.cpp b/llvm/utils/TableGen/AsmWriterEmitter.cpp --- a/llvm/utils/TableGen/AsmWriterEmitter.cpp +++ b/llvm/utils/TableGen/AsmWriterEmitter.cpp @@ -868,8 +868,6 @@ IAPrinter IAP(CGA.Result->getAsString(), FlatAliasAsmString, NumMIOps); - bool CantHandle = false; - unsigned MIOpNum = 0; for (unsigned i = 0, e = LastOpNo; i != e; ++i) { // Skip over tied operands as they're not part of an alias declaration. @@ -969,10 +967,9 @@ break; } case CodeGenInstAlias::ResultOperand::K_Reg: - // If this is zero_reg, something's playing tricks we're not - // equipped to handle. if (!CGA.ResultOperands[i].getRegister()) { - CantHandle = true; + IAP.addCond(std::string(formatv( + "AliasPatternCond::K_Reg, {0}::NoRegister", Namespace))); break; } @@ -985,8 +982,6 @@ MIOpNum += RO.getMINumOperands(); } - if (CantHandle) continue; - std::vector ReqFeatures; if (PassSubtarget) { // We only consider ReqFeatures predicates if PassSubtarget