diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td --- a/llvm/lib/Target/ARM/ARMInstrInfo.td +++ b/llvm/lib/Target/ARM/ARMInstrInfo.td @@ -6388,7 +6388,7 @@ (RSBri GPR:$Rd, GPR:$Rm, 0, pred:$p, cc_out:$s)>; // Pre-v6, 'mov r0, r0' was used as a NOP encoding. -def : InstAlias<"nop${p}", (MOVr R0, R0, pred:$p, zero_reg)>, +def : InstAlias<"nop${p}", (MOVr R0, R0, pred:$p, zero_reg), 0>, Requires<[IsARM, NoV6]>; // MUL/UMLAL/SMLAL/UMULL/SMULL are available on all arches, but diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -893,6 +893,7 @@ defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>; def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; +def : InstAlias<"vneg.v $vd, $vs", (VRSUB_VX VR:$vd, VR:$vs, X0, zero_reg)>; // Vector Widening Integer Add/Subtract // Refer to 11.2 Widening Vector Arithmetic Instructions @@ -920,8 +921,12 @@ def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm", (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; +def : InstAlias<"vwcvt.x.x.v $vd, $vs", + (VWADD_VX VR:$vd, VR:$vs, X0, zero_reg)>; def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm", (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; +def : InstAlias<"vwcvtu.x.x.v $vd, $vs", + (VWADDU_VX VR:$vd, VR:$vs, X0, zero_reg)>; // Vector Integer Extension defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>; @@ -950,6 +955,8 @@ def : InstAlias<"vnot.v $vd, $vs$vm", (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>; +def : InstAlias<"vnot.v $vd, $vs", + (VXOR_VI VR:$vd, VR:$vs, -1, zero_reg)>; // Vector Single-Width Bit Shift Instructions defm VSLL_V : VSHT_IV_V_X_I<"vsll", 0b100101, uimm5>; @@ -968,6 +975,8 @@ def : InstAlias<"vncvt.x.x.w $vd, $vs$vm", (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; +def : InstAlias<"vncvt.x.x.w $vd, $vs", + (VNSRL_WX VR:$vd, VR:$vs, X0, zero_reg)>; // Vector Integer Comparison Instructions let RVVConstraint = NoConstraint in { @@ -1186,8 +1195,12 @@ def : InstAlias<"vfneg.v $vd, $vs$vm", (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>; +def : InstAlias<"vfneg.v $vd, $vs", + (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>; def : InstAlias<"vfabs.v $vd, $vs$vm", (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>; +def : InstAlias<"vfabs.v $vd, $vs", + (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>; // Vector Floating-Point Compare Instructions let RVVConstraint = NoConstraint in { diff --git a/llvm/test/CodeGen/RISCV/rvv/aliases.mir b/llvm/test/CodeGen/RISCV/rvv/aliases.mir --- a/llvm/test/CodeGen/RISCV/rvv/aliases.mir +++ b/llvm/test/CodeGen/RISCV/rvv/aliases.mir @@ -1,7 +1,6 @@ # RUN: llc -mtriple riscv32 -mattr=+v -start-after riscv-expand-pseudo -o - %s | FileCheck %s # RUN: llc -mtriple riscv64 -mattr=+v -start-after riscv-expand-pseudo -o - %s | FileCheck %s -# FIXME: These should all use the 'vnot' alias despite some not having mask operands --- | define void @vnot_mask_1() { ret void @@ -45,7 +44,7 @@ liveins: $v25 ; CHECK-LABEL: vnot_no_mask_1: - ; CHECK: vxor.vi v25, v25, -1 + ; CHECK: vnot.v v25, v25 $v25 = VXOR_VI killed $v25, -1, $noreg, implicit $vtype, implicit $vl ... @@ -56,7 +55,7 @@ liveins: $v25 ; CHECK-LABEL: vnot_no_mask_2: - ; CHECK: vxor.vi v1, v25, -1 + ; CHECK: vnot.v v1, v25 $v1 = VXOR_VI killed $v25, -1, $noreg, implicit $vtype, implicit $vl ... diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll --- a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll +++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll @@ -21,7 +21,7 @@ ; RV32-NEXT: lui a0, 1048568 ; RV32-NEXT: vand.vx v8, v8, a0 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: fixedlen: @@ -32,7 +32,7 @@ ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: ret %v41 = insertelement <2 x i32> poison, i32 16, i32 0 %v42 = shufflevector <2 x i32> %v41, <2 x i32> poison, <2 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll @@ -14,7 +14,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -38,7 +38,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -61,7 +61,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV32D-NEXT: vnsrl.wi v9, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vncvt.x.x.w v9, v9 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vmseq.vi v0, v8, 0 ; RV32D-NEXT: vrsub.vx v8, v9, a0 @@ -76,7 +76,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV64D-NEXT: vnsrl.wi v9, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vncvt.x.x.w v9, v9 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vmseq.vi v0, v8, 0 ; RV64D-NEXT: vrsub.vx v8, v9, a0 @@ -97,7 +97,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -121,7 +121,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -144,7 +144,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV32D-NEXT: vnsrl.wi v9, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vncvt.x.x.w v9, v9 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vmseq.vi v0, v8, 0 ; RV32D-NEXT: vrsub.vx v8, v9, a0 @@ -159,7 +159,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV64D-NEXT: vnsrl.wi v9, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vncvt.x.x.w v9, v9 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vmseq.vi v0, v8, 0 ; RV64D-NEXT: vrsub.vx v8, v9, a0 @@ -180,7 +180,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -204,7 +204,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -227,7 +227,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32D-NEXT: vnsrl.wi v9, v10, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vncvt.x.x.w v9, v9 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vmseq.vi v0, v8, 0 ; RV32D-NEXT: vrsub.vx v8, v9, a0 @@ -242,7 +242,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64D-NEXT: vnsrl.wi v9, v10, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vncvt.x.x.w v9, v9 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vmseq.vi v0, v8, 0 ; RV64D-NEXT: vrsub.vx v8, v9, a0 @@ -263,7 +263,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -287,7 +287,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -310,7 +310,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32D-NEXT: vnsrl.wi v10, v12, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v10, zero +; RV32D-NEXT: vncvt.x.x.w v9, v10 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vmseq.vi v0, v8, 0 ; RV32D-NEXT: vrsub.vx v8, v9, a0 @@ -325,7 +325,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64D-NEXT: vnsrl.wi v10, v12, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v10, zero +; RV64D-NEXT: vncvt.x.x.w v9, v10 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vmseq.vi v0, v8, 0 ; RV64D-NEXT: vrsub.vx v8, v9, a0 @@ -346,7 +346,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v10 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v10, v10, a0 @@ -370,7 +370,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v10 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v10, v10, a0 @@ -393,7 +393,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; RV32D-NEXT: vnsrl.wi v12, v16, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v12, zero +; RV32D-NEXT: vncvt.x.x.w v10, v12 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vmseq.vi v0, v8, 0 ; RV32D-NEXT: vrsub.vx v8, v10, a0 @@ -408,7 +408,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; RV64D-NEXT: vnsrl.wi v12, v16, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v12, zero +; RV64D-NEXT: vncvt.x.x.w v10, v12 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vmseq.vi v0, v8, 0 ; RV64D-NEXT: vrsub.vx v8, v10, a0 @@ -429,7 +429,7 @@ ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: vsrl.vi v12, v8, 4 ; CHECK-NEXT: vor.vv v8, v8, v12 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v12, v12, a0 @@ -458,7 +458,7 @@ ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vsrl.vi v16, v8, 4 ; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vsrl.vi v16, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v16, v16, a0 @@ -489,7 +489,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -522,7 +522,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -584,7 +584,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -617,7 +617,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -679,7 +679,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -712,7 +712,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -774,7 +774,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v10 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -807,7 +807,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v10 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -869,7 +869,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v12 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -902,7 +902,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v12 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -964,7 +964,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 8 ; RV32-NEXT: vor.vv v8, v8, v16 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 5 ; RV32-NEXT: addi a0, a0, 1365 @@ -997,7 +997,7 @@ ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 8 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 5 ; RV64-NEXT: addiw a0, a0, 1365 @@ -1037,7 +1037,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -1073,7 +1073,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -1104,7 +1104,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32D-NEXT: vsrl.vx v9, v9, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vncvt.x.x.w v9, v9 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v9, v9, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1120,7 +1120,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64D-NEXT: vsrl.vx v9, v9, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vncvt.x.x.w v9, v9 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v9, v9, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -1146,7 +1146,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -1182,7 +1182,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -1213,7 +1213,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32D-NEXT: vsrl.vx v10, v10, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v10, zero +; RV32D-NEXT: vncvt.x.x.w v9, v10 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v9, v9, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1229,7 +1229,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64D-NEXT: vsrl.vx v10, v10, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v10, zero +; RV64D-NEXT: vncvt.x.x.w v9, v10 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v9, v9, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -1255,7 +1255,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v10 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -1291,7 +1291,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v10 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -1322,7 +1322,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32D-NEXT: vsrl.vx v12, v12, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v12, zero +; RV32D-NEXT: vncvt.x.x.w v10, v12 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v10, v10, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1338,7 +1338,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64D-NEXT: vsrl.vx v12, v12, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v12, zero +; RV64D-NEXT: vncvt.x.x.w v10, v12 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v10, v10, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -1364,7 +1364,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v12 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -1400,7 +1400,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v12 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -1431,7 +1431,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32D-NEXT: vsrl.vx v16, v16, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v16, zero +; RV32D-NEXT: vncvt.x.x.w v12, v16 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v12, v12, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1447,7 +1447,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV64D-NEXT: vsrl.vx v16, v16, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v16, zero +; RV64D-NEXT: vncvt.x.x.w v12, v16 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v12, v12, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -1473,7 +1473,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 16 ; RV32-NEXT: vor.vv v8, v8, v16 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 349525 ; RV32-NEXT: addi a0, a0, 1365 @@ -1509,7 +1509,7 @@ ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 16 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 @@ -1573,7 +1573,7 @@ ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsrl.vi v11, v8, 1 @@ -1612,7 +1612,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v9, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v9 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI18_0) ; RV64-NEXT: ld a0, %lo(.LCPI18_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI18_1) @@ -1677,7 +1677,7 @@ ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsrl.vi v14, v8, 1 @@ -1716,7 +1716,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v10, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v10 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI19_0) ; RV64-NEXT: ld a0, %lo(.LCPI19_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI19_1) @@ -1781,7 +1781,7 @@ ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsrl.vi v20, v8, 1 @@ -1820,7 +1820,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v12, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v12 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI20_0) ; RV64-NEXT: ld a0, %lo(.LCPI20_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI20_1) @@ -1885,7 +1885,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsrl.vi v0, v8, 1 @@ -1924,7 +1924,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v16, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI21_0) ; RV64-NEXT: ld a0, %lo(.LCPI21_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI21_1) @@ -1962,7 +1962,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -1986,7 +1986,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -2009,7 +2009,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV32D-NEXT: vnsrl.wi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2022,7 +2022,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV64D-NEXT: vnsrl.wi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2040,7 +2040,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -2064,7 +2064,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -2087,7 +2087,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV32D-NEXT: vnsrl.wi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2100,7 +2100,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV64D-NEXT: vnsrl.wi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2118,7 +2118,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -2142,7 +2142,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -2165,7 +2165,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32D-NEXT: vnsrl.wi v10, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v10, zero +; RV32D-NEXT: vncvt.x.x.w v8, v10 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2178,7 +2178,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64D-NEXT: vnsrl.wi v10, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v10, zero +; RV64D-NEXT: vncvt.x.x.w v8, v10 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2196,7 +2196,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v9, v9, a0 @@ -2220,7 +2220,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v9, v9, a0 @@ -2243,7 +2243,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32D-NEXT: vnsrl.wi v12, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v12, zero +; RV32D-NEXT: vncvt.x.x.w v8, v12 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2256,7 +2256,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64D-NEXT: vnsrl.wi v12, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v12, zero +; RV64D-NEXT: vncvt.x.x.w v8, v12 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2274,7 +2274,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 4 ; RV32I-NEXT: vor.vv v8, v8, v10 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: li a0, 85 ; RV32I-NEXT: vand.vx v10, v10, a0 @@ -2298,7 +2298,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 4 ; RV64I-NEXT: vor.vv v8, v8, v10 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: li a0, 85 ; RV64I-NEXT: vand.vx v10, v10, a0 @@ -2321,7 +2321,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; RV32D-NEXT: vnsrl.wi v16, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v16, zero +; RV32D-NEXT: vncvt.x.x.w v8, v16 ; RV32D-NEXT: li a0, 134 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2334,7 +2334,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; RV64D-NEXT: vnsrl.wi v16, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v16, zero +; RV64D-NEXT: vncvt.x.x.w v8, v16 ; RV64D-NEXT: li a0, 134 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2352,7 +2352,7 @@ ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: vsrl.vi v12, v8, 4 ; CHECK-NEXT: vor.vv v8, v8, v12 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v12, v12, a0 @@ -2380,7 +2380,7 @@ ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vsrl.vi v16, v8, 4 ; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vsrl.vi v16, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v16, v16, a0 @@ -2410,7 +2410,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -2443,7 +2443,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -2498,7 +2498,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -2531,7 +2531,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -2586,7 +2586,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -2619,7 +2619,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -2674,7 +2674,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v10 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -2707,7 +2707,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v10 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -2762,7 +2762,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 8 ; RV32I-NEXT: vor.vv v8, v8, v12 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 5 ; RV32I-NEXT: addi a0, a0, 1365 @@ -2795,7 +2795,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 8 ; RV64I-NEXT: vor.vv v8, v8, v12 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 5 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -2850,7 +2850,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 8 ; RV32-NEXT: vor.vv v8, v8, v16 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 5 ; RV32-NEXT: addi a0, a0, 1365 @@ -2883,7 +2883,7 @@ ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 8 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 5 ; RV64-NEXT: addiw a0, a0, 1365 @@ -2922,7 +2922,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -2958,7 +2958,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -2989,7 +2989,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32D-NEXT: vsrl.vx v8, v9, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -3002,7 +3002,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64D-NEXT: vsrl.vx v8, v9, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -3024,7 +3024,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v9 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -3060,7 +3060,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v9 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -3091,7 +3091,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32D-NEXT: vsrl.vx v8, v10, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vncvt.x.x.w v10, v8 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v8, v10, a0 ; RV32D-NEXT: ret @@ -3104,7 +3104,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64D-NEXT: vsrl.vx v8, v10, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vncvt.x.x.w v10, v8 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v8, v10, a0 ; RV64D-NEXT: ret @@ -3126,7 +3126,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v10 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -3162,7 +3162,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v10 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -3193,7 +3193,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32D-NEXT: vsrl.vx v8, v12, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vncvt.x.x.w v12, v8 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v8, v12, a0 ; RV32D-NEXT: ret @@ -3206,7 +3206,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64D-NEXT: vsrl.vx v8, v12, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vncvt.x.x.w v12, v8 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v8, v12, a0 ; RV64D-NEXT: ret @@ -3228,7 +3228,7 @@ ; RV32I-NEXT: vor.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 16 ; RV32I-NEXT: vor.vv v8, v8, v12 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1365 @@ -3264,7 +3264,7 @@ ; RV64I-NEXT: vor.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 16 ; RV64I-NEXT: vor.vv v8, v8, v12 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 349525 ; RV64I-NEXT: addiw a0, a0, 1365 @@ -3295,7 +3295,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32D-NEXT: vsrl.vx v8, v16, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vncvt.x.x.w v16, v8 ; RV32D-NEXT: li a0, 1054 ; RV32D-NEXT: vrsub.vx v8, v16, a0 ; RV32D-NEXT: ret @@ -3308,7 +3308,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV64D-NEXT: vsrl.vx v8, v16, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vncvt.x.x.w v16, v8 ; RV64D-NEXT: li a0, 1054 ; RV64D-NEXT: vrsub.vx v8, v16, a0 ; RV64D-NEXT: ret @@ -3330,7 +3330,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 16 ; RV32-NEXT: vor.vv v8, v8, v16 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 349525 ; RV32-NEXT: addi a0, a0, 1365 @@ -3366,7 +3366,7 @@ ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 16 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 @@ -3429,7 +3429,7 @@ ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsrl.vi v11, v8, 1 @@ -3468,7 +3468,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v9, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v9 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI40_0) ; RV64-NEXT: ld a0, %lo(.LCPI40_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI40_1) @@ -3532,7 +3532,7 @@ ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsrl.vi v14, v8, 1 @@ -3571,7 +3571,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v10, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v10 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI41_0) ; RV64-NEXT: ld a0, %lo(.LCPI41_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI41_1) @@ -3635,7 +3635,7 @@ ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsrl.vi v20, v8, 1 @@ -3674,7 +3674,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v12, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v12 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI42_0) ; RV64-NEXT: ld a0, %lo(.LCPI42_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI42_1) @@ -3738,7 +3738,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsrl.vi v0, v8, 1 @@ -3777,7 +3777,7 @@ ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsrl.vx v16, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: lui a0, %hi(.LCPI43_0) ; RV64-NEXT: ld a0, %lo(.LCPI43_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI43_1) diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll @@ -10,7 +10,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -31,7 +31,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -60,7 +60,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV32D-NEXT: vnsrl.wi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -79,7 +79,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV64D-NEXT: vnsrl.wi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -95,7 +95,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -116,7 +116,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -145,7 +145,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV32D-NEXT: vnsrl.wi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -164,7 +164,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV64D-NEXT: vnsrl.wi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -180,7 +180,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -201,7 +201,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -230,7 +230,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32D-NEXT: vnsrl.wi v10, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v10, zero +; RV32D-NEXT: vncvt.x.x.w v8, v10 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -249,7 +249,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64D-NEXT: vnsrl.wi v10, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v10, zero +; RV64D-NEXT: vncvt.x.x.w v8, v10 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -265,7 +265,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -286,7 +286,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -315,7 +315,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32D-NEXT: vnsrl.wi v12, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v12, zero +; RV32D-NEXT: vncvt.x.x.w v8, v12 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -334,7 +334,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64D-NEXT: vnsrl.wi v12, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v12, zero +; RV64D-NEXT: vncvt.x.x.w v8, v12 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -350,7 +350,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; RV32I-NEXT: vsub.vx v10, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -371,7 +371,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; RV64I-NEXT: vsub.vx v10, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -400,7 +400,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; RV32D-NEXT: vnsrl.wi v16, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v16, zero +; RV32D-NEXT: vncvt.x.x.w v8, v16 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -419,7 +419,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; RV64D-NEXT: vnsrl.wi v16, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v16, zero +; RV64D-NEXT: vncvt.x.x.w v8, v16 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: vmerge.vim v8, v8, 8, v0 @@ -435,7 +435,7 @@ ; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vsub.vx v12, v8, a0 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: li a0, 85 @@ -461,7 +461,7 @@ ; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vsub.vx v16, v8, a0 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: vsrl.vi v16, v8, 1 ; CHECK-NEXT: li a0, 85 @@ -487,7 +487,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -515,7 +515,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -578,7 +578,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -606,7 +606,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -669,7 +669,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -697,7 +697,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -760,7 +760,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV32I-NEXT: vsub.vx v10, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -788,7 +788,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV64I-NEXT: vsub.vx v10, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -851,7 +851,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV32I-NEXT: vsub.vx v12, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -879,7 +879,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV64I-NEXT: vsub.vx v12, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -942,7 +942,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 5 @@ -970,7 +970,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 5 @@ -1003,7 +1003,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -1032,7 +1032,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -1066,7 +1066,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32D-NEXT: vsrl.vx v9, v10, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vncvt.x.x.w v9, v9 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v9, v9, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1086,7 +1086,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64D-NEXT: vsrl.vx v8, v9, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: li a0, 32 @@ -1103,7 +1103,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -1132,7 +1132,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -1166,7 +1166,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32D-NEXT: vsrl.vx v10, v10, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v10, zero +; RV32D-NEXT: vncvt.x.x.w v9, v10 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v9, v9, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1186,7 +1186,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64D-NEXT: vsrl.vx v8, v10, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vncvt.x.x.w v10, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v10, a0 ; RV64D-NEXT: li a0, 32 @@ -1203,7 +1203,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV32I-NEXT: vsub.vx v10, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -1232,7 +1232,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV64I-NEXT: vsub.vx v10, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -1266,7 +1266,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32D-NEXT: vsrl.vx v12, v12, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v12, zero +; RV32D-NEXT: vncvt.x.x.w v10, v12 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v10, v10, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1286,7 +1286,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64D-NEXT: vsrl.vx v8, v12, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vncvt.x.x.w v12, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v12, a0 ; RV64D-NEXT: li a0, 32 @@ -1303,7 +1303,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32I-NEXT: vsub.vx v12, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -1332,7 +1332,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV64I-NEXT: vsub.vx v12, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -1366,7 +1366,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32D-NEXT: vsrl.vx v16, v16, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v16, zero +; RV32D-NEXT: vncvt.x.x.w v12, v16 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v12, v12, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -1386,7 +1386,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV64D-NEXT: vsrl.vx v8, v16, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vncvt.x.x.w v16, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v16, a0 ; RV64D-NEXT: li a0, 32 @@ -1403,7 +1403,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 349525 @@ -1432,7 +1432,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 349525 @@ -1484,7 +1484,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v9 @@ -1515,7 +1515,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v9 ; RV64-NEXT: lui a0, %hi(.LCPI18_0) ; RV64-NEXT: ld a0, %lo(.LCPI18_0)(a0) @@ -1568,7 +1568,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vsub.vx v10, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v10 @@ -1599,7 +1599,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vsub.vx v10, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v10 ; RV64-NEXT: lui a0, %hi(.LCPI19_0) ; RV64-NEXT: ld a0, %lo(.LCPI19_0)(a0) @@ -1652,7 +1652,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vsub.vx v12, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v12 @@ -1683,7 +1683,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vsub.vx v12, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v12 ; RV64-NEXT: lui a0, %hi(.LCPI20_0) ; RV64-NEXT: ld a0, %lo(.LCPI20_0)(a0) @@ -1736,7 +1736,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v16 @@ -1767,7 +1767,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 ; RV64-NEXT: lui a0, %hi(.LCPI21_0) ; RV64-NEXT: ld a0, %lo(.LCPI21_0)(a0) @@ -1802,7 +1802,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -1823,7 +1823,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -1850,7 +1850,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV32D-NEXT: vnsrl.wi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -1866,7 +1866,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV64D-NEXT: vnsrl.wi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -1880,7 +1880,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -1901,7 +1901,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -1928,7 +1928,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV32D-NEXT: vnsrl.wi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -1944,7 +1944,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV64D-NEXT: vnsrl.wi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -1958,7 +1958,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -1979,7 +1979,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -2006,7 +2006,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32D-NEXT: vnsrl.wi v10, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v10, zero +; RV32D-NEXT: vncvt.x.x.w v8, v10 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2022,7 +2022,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64D-NEXT: vnsrl.wi v10, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v10, zero +; RV64D-NEXT: vncvt.x.x.w v8, v10 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2036,7 +2036,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -2057,7 +2057,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -2084,7 +2084,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32D-NEXT: vnsrl.wi v12, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v12, zero +; RV32D-NEXT: vncvt.x.x.w v8, v12 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2100,7 +2100,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64D-NEXT: vnsrl.wi v12, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v12, zero +; RV64D-NEXT: vncvt.x.x.w v8, v12 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2114,7 +2114,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; RV32I-NEXT: vsub.vx v10, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: li a0, 85 @@ -2135,7 +2135,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; RV64I-NEXT: vsub.vx v10, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: li a0, 85 @@ -2162,7 +2162,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; RV32D-NEXT: vnsrl.wi v16, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v16, zero +; RV32D-NEXT: vncvt.x.x.w v8, v16 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2178,7 +2178,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; RV64D-NEXT: vnsrl.wi v16, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v16, zero +; RV64D-NEXT: vncvt.x.x.w v8, v16 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2192,7 +2192,7 @@ ; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vsub.vx v12, v8, a0 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: li a0, 85 @@ -2217,7 +2217,7 @@ ; CHECK-NEXT: li a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vsub.vx v16, v8, a0 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: vsrl.vi v16, v8, 1 ; CHECK-NEXT: li a0, 85 @@ -2242,7 +2242,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -2270,7 +2270,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -2324,7 +2324,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -2352,7 +2352,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -2406,7 +2406,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -2434,7 +2434,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -2488,7 +2488,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV32I-NEXT: vsub.vx v10, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -2516,7 +2516,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV64I-NEXT: vsub.vx v10, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -2570,7 +2570,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV32I-NEXT: vsub.vx v12, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 5 @@ -2598,7 +2598,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV64I-NEXT: vsub.vx v12, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 5 @@ -2652,7 +2652,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 5 @@ -2680,7 +2680,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 5 @@ -2712,7 +2712,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -2741,7 +2741,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -2775,7 +2775,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32D-NEXT: vsrl.vx v8, v9, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vncvt.x.x.w v8, v8 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2790,7 +2790,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64D-NEXT: vsrl.vx v8, v9, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vncvt.x.x.w v8, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2804,7 +2804,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV32I-NEXT: vsub.vx v9, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -2833,7 +2833,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV64I-NEXT: vsub.vx v9, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -2867,7 +2867,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32D-NEXT: vsrl.vx v8, v10, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vncvt.x.x.w v10, v8 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v8, v10, a0 ; RV32D-NEXT: ret @@ -2882,7 +2882,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64D-NEXT: vsrl.vx v8, v10, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vncvt.x.x.w v10, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v10, a0 ; RV64D-NEXT: ret @@ -2896,7 +2896,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV32I-NEXT: vsub.vx v10, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -2925,7 +2925,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV64I-NEXT: vsub.vx v10, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -2959,7 +2959,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32D-NEXT: vsrl.vx v8, v12, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vncvt.x.x.w v12, v8 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v8, v12, a0 ; RV32D-NEXT: ret @@ -2974,7 +2974,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64D-NEXT: vsrl.vx v8, v12, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vncvt.x.x.w v12, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v12, a0 ; RV64D-NEXT: ret @@ -2988,7 +2988,7 @@ ; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32I-NEXT: vsub.vx v12, v8, a0 -; RV32I-NEXT: vxor.vi v8, v8, -1 +; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: lui a0, 349525 @@ -3017,7 +3017,7 @@ ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV64I-NEXT: vsub.vx v12, v8, a0 -; RV64I-NEXT: vxor.vi v8, v8, -1 +; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: lui a0, 349525 @@ -3051,7 +3051,7 @@ ; RV32D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32D-NEXT: vsrl.vx v8, v16, a0 ; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vncvt.x.x.w v16, v8 ; RV32D-NEXT: li a0, 1023 ; RV32D-NEXT: vsub.vx v8, v16, a0 ; RV32D-NEXT: ret @@ -3066,7 +3066,7 @@ ; RV64D-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV64D-NEXT: vsrl.vx v8, v16, a0 ; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vncvt.x.x.w v16, v8 ; RV64D-NEXT: li a0, 1023 ; RV64D-NEXT: vsub.vx v8, v16, a0 ; RV64D-NEXT: ret @@ -3080,7 +3080,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 349525 @@ -3109,7 +3109,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 349525 @@ -3160,7 +3160,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV32-NEXT: vsub.vx v9, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v9 @@ -3191,7 +3191,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vsub.vx v9, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v9 ; RV64-NEXT: lui a0, %hi(.LCPI40_0) ; RV64-NEXT: ld a0, %lo(.LCPI40_0)(a0) @@ -3243,7 +3243,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV32-NEXT: vsub.vx v10, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v10 @@ -3274,7 +3274,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vsub.vx v10, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v10 ; RV64-NEXT: lui a0, %hi(.LCPI41_0) ; RV64-NEXT: ld a0, %lo(.LCPI41_0)(a0) @@ -3326,7 +3326,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV32-NEXT: vsub.vx v12, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v12 @@ -3357,7 +3357,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vsub.vx v12, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v12 ; RV64-NEXT: lui a0, %hi(.LCPI42_0) ; RV64-NEXT: ld a0, %lo(.LCPI42_0)(a0) @@ -3409,7 +3409,7 @@ ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsub.vx v16, v8, a0 -; RV32-NEXT: vxor.vi v8, v8, -1 +; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v16 @@ -3440,7 +3440,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsub.vx v16, v8, a0 -; RV64-NEXT: vxor.vi v8, v8, -1 +; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 ; RV64-NEXT: lui a0, %hi(.LCPI43_0) ; RV64-NEXT: ld a0, %lo(.LCPI43_0)(a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll @@ -410,7 +410,7 @@ ; CHECK-LABEL: truncstore_nxv1i16_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -474,7 +474,7 @@ ; CHECK-LABEL: truncstore_nxv2i16_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -538,7 +538,7 @@ ; CHECK-LABEL: truncstore_nxv4i16_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -598,7 +598,7 @@ ; CHECK-LABEL: truncstore_nxv8i16_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -658,7 +658,7 @@ ; CHECK-LABEL: truncstore_nxv16i16_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -694,7 +694,7 @@ ; CHECK-LABEL: truncstore_nxv32i16_nxv32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -706,9 +706,9 @@ ; CHECK-LABEL: truncstore_nxv1i32_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -720,7 +720,7 @@ ; CHECK-LABEL: truncstore_nxv1i32_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -758,9 +758,9 @@ ; CHECK-LABEL: truncstore_nxv2i32_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -772,7 +772,7 @@ ; CHECK-LABEL: truncstore_nxv2i32_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -808,9 +808,9 @@ ; CHECK-LABEL: truncstore_nxv4i32_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -822,7 +822,7 @@ ; CHECK-LABEL: truncstore_nxv4i32_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -858,9 +858,9 @@ ; CHECK-LABEL: truncstore_nxv8i32_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -872,7 +872,7 @@ ; CHECK-LABEL: truncstore_nxv8i32_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -908,9 +908,9 @@ ; CHECK-LABEL: truncstore_nxv16i32_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -922,7 +922,7 @@ ; CHECK-LABEL: truncstore_nxv16i32_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -934,11 +934,11 @@ ; CHECK-LABEL: truncstore_nxv1i64_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -950,9 +950,9 @@ ; CHECK-LABEL: truncstore_nxv1i64_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -964,7 +964,7 @@ ; CHECK-LABEL: truncstore_nxv1i64_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -976,11 +976,11 @@ ; CHECK-LABEL: truncstore_nxv2i64_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -992,9 +992,9 @@ ; CHECK-LABEL: truncstore_nxv2i64_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1006,7 +1006,7 @@ ; CHECK-LABEL: truncstore_nxv2i64_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1018,11 +1018,11 @@ ; CHECK-LABEL: truncstore_nxv4i64_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1034,9 +1034,9 @@ ; CHECK-LABEL: truncstore_nxv4i64_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1048,7 +1048,7 @@ ; CHECK-LABEL: truncstore_nxv4i64_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1060,11 +1060,11 @@ ; CHECK-LABEL: truncstore_nxv8i64_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1076,9 +1076,9 @@ ; CHECK-LABEL: truncstore_nxv8i64_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: ret %y = trunc %x to @@ -1090,7 +1090,7 @@ ; CHECK-LABEL: truncstore_nxv8i64_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret %y = trunc %x to diff --git a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll @@ -17,7 +17,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI0_1)(a0) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -40,7 +40,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI1_1)(a0) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -63,7 +63,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI2_1)(a0) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -86,7 +86,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI3_1)(a0) ; CHECK-NEXT: vfadd.vf v12, v10, ft0 ; CHECK-NEXT: vmerge.vvm v10, v10, v12, v0 -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft1 ; CHECK-NEXT: vfsgnj.vv v10, v10, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 @@ -109,7 +109,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI4_1)(a0) ; CHECK-NEXT: vfadd.vf v16, v12, ft0 ; CHECK-NEXT: vmerge.vvm v12, v12, v16, v0 -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft1 ; CHECK-NEXT: vfsgnj.vv v12, v12, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 @@ -132,7 +132,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI5_1)(a0) ; CHECK-NEXT: vfadd.vf v24, v16, ft0 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: vfsgnjx.vv v24, v8, v8 +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft1 ; CHECK-NEXT: vfsgnj.vv v16, v16, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 @@ -155,7 +155,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI6_1)(a0) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -178,7 +178,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI7_1)(a0) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -201,7 +201,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI8_1)(a0) ; CHECK-NEXT: vfadd.vf v12, v10, ft0 ; CHECK-NEXT: vmerge.vvm v10, v10, v12, v0 -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft1 ; CHECK-NEXT: vfsgnj.vv v10, v10, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 @@ -224,7 +224,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI9_1)(a0) ; CHECK-NEXT: vfadd.vf v16, v12, ft0 ; CHECK-NEXT: vmerge.vvm v12, v12, v16, v0 -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft1 ; CHECK-NEXT: vfsgnj.vv v12, v12, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 @@ -247,7 +247,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI10_1)(a0) ; CHECK-NEXT: vfadd.vf v24, v16, ft0 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: vfsgnjx.vv v24, v8, v8 +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft1 ; CHECK-NEXT: vfsgnj.vv v16, v16, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 @@ -270,7 +270,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI11_1)(a0) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -293,7 +293,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI12_1)(a0) ; CHECK-NEXT: vfadd.vf v12, v10, ft0 ; CHECK-NEXT: vmerge.vvm v10, v10, v12, v0 -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft1 ; CHECK-NEXT: vfsgnj.vv v10, v10, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 @@ -316,7 +316,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI13_1)(a0) ; CHECK-NEXT: vfadd.vf v16, v12, ft0 ; CHECK-NEXT: vmerge.vvm v12, v12, v16, v0 -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft1 ; CHECK-NEXT: vfsgnj.vv v12, v12, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 @@ -339,7 +339,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI14_1)(a0) ; CHECK-NEXT: vfadd.vf v24, v16, ft0 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: vfsgnjx.vv v24, v8, v8 +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft1 ; CHECK-NEXT: vfsgnj.vv v16, v16, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll @@ -17,7 +17,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI0_1)(a0) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -40,7 +40,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI1_1)(a0) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -63,7 +63,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI2_1)(a0) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -86,7 +86,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI3_1)(a0) ; CHECK-NEXT: vfsub.vf v12, v10, ft0 ; CHECK-NEXT: vmerge.vvm v10, v10, v12, v0 -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft1 ; CHECK-NEXT: vfsgnj.vv v10, v10, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 @@ -109,7 +109,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI4_1)(a0) ; CHECK-NEXT: vfsub.vf v16, v12, ft0 ; CHECK-NEXT: vmerge.vvm v12, v12, v16, v0 -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft1 ; CHECK-NEXT: vfsgnj.vv v12, v12, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 @@ -132,7 +132,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI5_1)(a0) ; CHECK-NEXT: vfsub.vf v24, v16, ft0 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: vfsgnjx.vv v24, v8, v8 +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft1 ; CHECK-NEXT: vfsgnj.vv v16, v16, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 @@ -155,7 +155,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI6_1)(a0) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -178,7 +178,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI7_1)(a0) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -201,7 +201,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI8_1)(a0) ; CHECK-NEXT: vfsub.vf v12, v10, ft0 ; CHECK-NEXT: vmerge.vvm v10, v10, v12, v0 -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft1 ; CHECK-NEXT: vfsgnj.vv v10, v10, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 @@ -224,7 +224,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI9_1)(a0) ; CHECK-NEXT: vfsub.vf v16, v12, ft0 ; CHECK-NEXT: vmerge.vvm v12, v12, v16, v0 -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft1 ; CHECK-NEXT: vfsgnj.vv v12, v12, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 @@ -247,7 +247,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI10_1)(a0) ; CHECK-NEXT: vfsub.vf v24, v16, ft0 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: vfsgnjx.vv v24, v8, v8 +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft1 ; CHECK-NEXT: vfsgnj.vv v16, v16, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 @@ -270,7 +270,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI11_1)(a0) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -293,7 +293,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI12_1)(a0) ; CHECK-NEXT: vfsub.vf v12, v10, ft0 ; CHECK-NEXT: vmerge.vvm v10, v10, v12, v0 -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft1 ; CHECK-NEXT: vfsgnj.vv v10, v10, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 @@ -316,7 +316,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI13_1)(a0) ; CHECK-NEXT: vfsub.vf v16, v12, ft0 ; CHECK-NEXT: vmerge.vvm v12, v12, v16, v0 -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft1 ; CHECK-NEXT: vfsgnj.vv v12, v12, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 @@ -339,7 +339,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI14_1)(a0) ; CHECK-NEXT: vfsub.vf v24, v16, ft0 ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: vfsgnjx.vv v24, v8, v8 +; CHECK-NEXT: vfabs.v v24, v8 ; CHECK-NEXT: vmflt.vf v0, v24, ft1 ; CHECK-NEXT: vfsgnj.vv v16, v16, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll @@ -21,7 +21,7 @@ ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: li a1, 85 ; CHECK-NEXT: vand.vx v9, v9, a1 @@ -47,7 +47,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; LMULMAX8-NEXT: vnsrl.wi v10, v12, 23 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX8-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX8-NEXT: li a1, 134 ; LMULMAX8-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-NEXT: vrsub.vx v8, v9, a1 @@ -75,7 +75,7 @@ ; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 8 ; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV32I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32I-NEXT: lui a1, 5 ; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365 @@ -110,7 +110,7 @@ ; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 8 ; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV64I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64I-NEXT: lui a1, 5 ; LMULMAX2-RV64I-NEXT: addiw a1, a1, 1365 @@ -145,7 +145,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 8 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: lui a1, 5 ; LMULMAX1-RV32-NEXT: addi a1, a1, 1365 @@ -180,7 +180,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 8 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: lui a1, 5 ; LMULMAX1-RV64-NEXT: addiw a1, a1, 1365 @@ -267,7 +267,7 @@ ; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 16 ; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV32I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32I-NEXT: lui a1, 349525 ; LMULMAX2-RV32I-NEXT: addi a1, a1, 1365 @@ -305,7 +305,7 @@ ; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 16 ; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV64I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64I-NEXT: lui a1, 349525 ; LMULMAX2-RV64I-NEXT: addiw a1, a1, 1365 @@ -343,7 +343,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 16 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: lui a1, 349525 ; LMULMAX1-RV32-NEXT: addi a1, a1, 1365 @@ -381,7 +381,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 16 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: lui a1, 349525 ; LMULMAX1-RV64-NEXT: addiw a1, a1, 1365 @@ -532,7 +532,7 @@ ; LMULMAX2-RV64-NEXT: li a1, 32 ; LMULMAX2-RV64-NEXT: vsrl.vx v9, v8, a1 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI3_0) ; LMULMAX2-RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1) ; LMULMAX2-RV64-NEXT: lui a2, %hi(.LCPI3_1) @@ -631,7 +631,7 @@ ; LMULMAX1-RV64-NEXT: li a1, 32 ; LMULMAX1-RV64-NEXT: vsrl.vx v9, v8, a1 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI3_0) ; LMULMAX1-RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1) ; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI3_1) @@ -730,7 +730,7 @@ ; LMULMAX8-RV64-NEXT: li a1, 32 ; LMULMAX8-RV64-NEXT: vsrl.vx v9, v8, a1 ; LMULMAX8-RV64-NEXT: vor.vv v8, v8, v9 -; LMULMAX8-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX8-RV64-NEXT: vnot.v v8, v8 ; LMULMAX8-RV64-NEXT: lui a1, %hi(.LCPI3_0) ; LMULMAX8-RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1) ; LMULMAX8-RV64-NEXT: lui a2, %hi(.LCPI3_1) @@ -774,7 +774,7 @@ ; LMULMAX2-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-NEXT: vsrl.vi v10, v8, 4 ; LMULMAX2-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-NEXT: vnot.v v8, v8 ; LMULMAX2-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-NEXT: li a1, 85 ; LMULMAX2-NEXT: vand.vx v10, v10, a1 @@ -802,7 +802,7 @@ ; LMULMAX1-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-NEXT: vsrl.vi v10, v8, 4 ; LMULMAX1-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-NEXT: vnot.v v8, v8 ; LMULMAX1-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-NEXT: li a2, 85 ; LMULMAX1-NEXT: vand.vx v10, v10, a2 @@ -821,7 +821,7 @@ ; LMULMAX1-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-NEXT: vsrl.vi v10, v9, 4 ; LMULMAX1-NEXT: vor.vv v9, v9, v10 -; LMULMAX1-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-NEXT: vnot.v v9, v9 ; LMULMAX1-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-NEXT: vand.vx v10, v10, a2 ; LMULMAX1-NEXT: vsub.vv v9, v9, v10 @@ -847,7 +847,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; LMULMAX8-NEXT: vnsrl.wi v12, v16, 23 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v10, v12, zero +; LMULMAX8-NEXT: vncvt.x.x.w v10, v12 ; LMULMAX8-NEXT: li a1, 134 ; LMULMAX8-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-NEXT: vrsub.vx v8, v10, a1 @@ -875,7 +875,7 @@ ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 5 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 @@ -910,7 +910,7 @@ ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: lui a1, 5 ; LMULMAX2-RV64-NEXT: addiw a1, a1, 1365 @@ -947,7 +947,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV32-NEXT: lui a2, 5 ; LMULMAX1-RV32-NEXT: addi a2, a2, 1365 @@ -975,7 +975,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 8 ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10 -; LMULMAX1-RV32-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV32-NEXT: vnot.v v9, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a2 ; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v10 @@ -1006,7 +1006,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV64-NEXT: lui a2, 5 ; LMULMAX1-RV64-NEXT: addiw a2, a2, 1365 @@ -1034,7 +1034,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 8 ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 -; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV64-NEXT: vnot.v v9, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2 ; LMULMAX1-RV64-NEXT: vsub.vv v9, v9, v10 @@ -1087,7 +1087,7 @@ ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 16 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 @@ -1125,7 +1125,7 @@ ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 16 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: lui a1, 349525 ; LMULMAX2-RV64-NEXT: addiw a1, a1, 1365 @@ -1165,7 +1165,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 16 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV32-NEXT: lui a2, 349525 ; LMULMAX1-RV32-NEXT: addi a2, a2, 1365 @@ -1196,7 +1196,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 16 ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10 -; LMULMAX1-RV32-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV32-NEXT: vnot.v v9, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a2 ; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v10 @@ -1229,7 +1229,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 16 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV64-NEXT: lui a2, 349525 ; LMULMAX1-RV64-NEXT: addiw a2, a2, 1365 @@ -1260,7 +1260,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 16 ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 -; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV64-NEXT: vnot.v v9, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a2 ; LMULMAX1-RV64-NEXT: vsub.vv v9, v9, v10 @@ -1374,7 +1374,7 @@ ; LMULMAX2-RV64-NEXT: li a1, 32 ; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a1 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI7_0) ; LMULMAX2-RV64-NEXT: ld a1, %lo(.LCPI7_0)(a1) ; LMULMAX2-RV64-NEXT: lui a2, %hi(.LCPI7_1) @@ -1503,7 +1503,7 @@ ; LMULMAX1-RV64-NEXT: li a2, 32 ; LMULMAX1-RV64-NEXT: vsrl.vx v10, v8, a2 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: lui a3, %hi(.LCPI7_0) ; LMULMAX1-RV64-NEXT: ld a3, %lo(.LCPI7_0)(a3) ; LMULMAX1-RV64-NEXT: lui a4, %hi(.LCPI7_1) @@ -1537,7 +1537,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vsrl.vx v10, v9, a2 ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 -; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV64-NEXT: vnot.v v9, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3 ; LMULMAX1-RV64-NEXT: vsub.vv v9, v9, v10 @@ -1628,7 +1628,7 @@ ; LMULMAX8-RV64-NEXT: li a1, 32 ; LMULMAX8-RV64-NEXT: vsrl.vx v10, v8, a1 ; LMULMAX8-RV64-NEXT: vor.vv v8, v8, v10 -; LMULMAX8-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX8-RV64-NEXT: vnot.v v8, v8 ; LMULMAX8-RV64-NEXT: lui a1, %hi(.LCPI7_0) ; LMULMAX8-RV64-NEXT: ld a1, %lo(.LCPI7_0)(a1) ; LMULMAX8-RV64-NEXT: lui a2, %hi(.LCPI7_1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll @@ -17,7 +17,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: vsub.vx v9, v8, a1 -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: li a1, 85 @@ -46,7 +46,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; LMULMAX8-NEXT: vnsrl.wi v10, v12, 23 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX8-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX8-NEXT: li a1, 127 ; LMULMAX8-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-NEXT: vsub.vx v8, v9, a1 @@ -68,7 +68,7 @@ ; LMULMAX2-RV32I-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32I-NEXT: li a1, 1 ; LMULMAX2-RV32I-NEXT: vsub.vx v9, v8, a1 -; LMULMAX2-RV32I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32I-NEXT: lui a1, 5 @@ -98,7 +98,7 @@ ; LMULMAX2-RV64I-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64I-NEXT: li a1, 1 ; LMULMAX2-RV64I-NEXT: vsub.vx v9, v8, a1 -; LMULMAX2-RV64I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64I-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64I-NEXT: lui a1, 5 @@ -128,7 +128,7 @@ ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: li a1, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v9, v8, a1 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: lui a1, 5 @@ -158,7 +158,7 @@ ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: li a1, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v9, v8, a1 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: lui a1, 5 @@ -244,7 +244,7 @@ ; LMULMAX2-RV32I-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32I-NEXT: li a1, 1 ; LMULMAX2-RV32I-NEXT: vsub.vx v9, v8, a1 -; LMULMAX2-RV32I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32I-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32I-NEXT: lui a1, 349525 @@ -275,7 +275,7 @@ ; LMULMAX2-RV64I-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64I-NEXT: li a1, 1 ; LMULMAX2-RV64I-NEXT: vsub.vx v9, v8, a1 -; LMULMAX2-RV64I-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64I-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64I-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64I-NEXT: lui a1, 349525 @@ -306,7 +306,7 @@ ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: li a1, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v9, v8, a1 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: lui a1, 349525 @@ -337,7 +337,7 @@ ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: li a1, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v9, v8, a1 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: lui a1, 349525 @@ -474,7 +474,7 @@ ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v9, v8, a1 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI3_0) ; LMULMAX2-RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1) @@ -553,7 +553,7 @@ ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: li a1, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v9, v8, a1 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI3_0) ; LMULMAX1-RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1) @@ -632,7 +632,7 @@ ; LMULMAX8-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV64-NEXT: li a1, 1 ; LMULMAX8-RV64-NEXT: vsub.vx v9, v8, a1 -; LMULMAX8-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX8-RV64-NEXT: vnot.v v8, v8 ; LMULMAX8-RV64-NEXT: vand.vv v8, v8, v9 ; LMULMAX8-RV64-NEXT: lui a1, %hi(.LCPI3_0) ; LMULMAX8-RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1) @@ -673,7 +673,7 @@ ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: li a1, 1 ; LMULMAX2-NEXT: vsub.vx v10, v8, a1 -; LMULMAX2-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-NEXT: vnot.v v8, v8 ; LMULMAX2-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-NEXT: li a1, 85 @@ -698,7 +698,7 @@ ; LMULMAX1-NEXT: vle8.v v9, (a0) ; LMULMAX1-NEXT: li a2, 1 ; LMULMAX1-NEXT: vsub.vx v10, v8, a2 -; LMULMAX1-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-NEXT: vnot.v v8, v8 ; LMULMAX1-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-NEXT: li a3, 85 @@ -713,7 +713,7 @@ ; LMULMAX1-NEXT: vadd.vv v8, v8, v10 ; LMULMAX1-NEXT: vand.vi v8, v8, 15 ; LMULMAX1-NEXT: vsub.vx v10, v9, a2 -; LMULMAX1-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-NEXT: vnot.v v9, v9 ; LMULMAX1-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-NEXT: vand.vx v10, v10, a3 @@ -742,7 +742,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; LMULMAX8-NEXT: vnsrl.wi v12, v16, 23 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v10, v12, zero +; LMULMAX8-NEXT: vncvt.x.x.w v10, v12 ; LMULMAX8-NEXT: li a1, 127 ; LMULMAX8-NEXT: vmseq.vi v0, v8, 0 ; LMULMAX8-NEXT: vsub.vx v8, v10, a1 @@ -764,7 +764,7 @@ ; LMULMAX2-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v10, v8, a1 -; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 5 @@ -794,7 +794,7 @@ ; LMULMAX2-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v10, v8, a1 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: lui a1, 5 @@ -826,7 +826,7 @@ ; LMULMAX1-RV32-NEXT: vle16.v v9, (a0) ; LMULMAX1-RV32-NEXT: li a2, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v8, a2 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV32-NEXT: lui a3, 5 @@ -848,7 +848,7 @@ ; LMULMAX1-RV32-NEXT: vmul.vx v8, v8, a6 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v9, a2 -; LMULMAX1-RV32-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV32-NEXT: vnot.v v9, v9 ; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a3 @@ -874,7 +874,7 @@ ; LMULMAX1-RV64-NEXT: vle16.v v9, (a0) ; LMULMAX1-RV64-NEXT: li a2, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v8, a2 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV64-NEXT: lui a3, 5 @@ -896,7 +896,7 @@ ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a6 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v9, a2 -; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV64-NEXT: vnot.v v9, v9 ; LMULMAX1-RV64-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3 @@ -944,7 +944,7 @@ ; LMULMAX2-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v10, v8, a1 -; LMULMAX2-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV32-NEXT: vnot.v v8, v8 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 @@ -975,7 +975,7 @@ ; LMULMAX2-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v10, v8, a1 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: lui a1, 349525 @@ -1008,7 +1008,7 @@ ; LMULMAX1-RV32-NEXT: vle32.v v9, (a0) ; LMULMAX1-RV32-NEXT: li a2, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v8, a2 -; LMULMAX1-RV32-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV32-NEXT: vnot.v v8, v8 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV32-NEXT: lui a3, 349525 @@ -1031,7 +1031,7 @@ ; LMULMAX1-RV32-NEXT: vmul.vx v8, v8, a6 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 24 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v9, a2 -; LMULMAX1-RV32-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV32-NEXT: vnot.v v9, v9 ; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a3 @@ -1057,7 +1057,7 @@ ; LMULMAX1-RV64-NEXT: vle32.v v9, (a0) ; LMULMAX1-RV64-NEXT: li a2, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v8, a2 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV64-NEXT: lui a3, 349525 @@ -1080,7 +1080,7 @@ ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a6 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 24 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v9, a2 -; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV64-NEXT: vnot.v v9, v9 ; LMULMAX1-RV64-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3 @@ -1176,7 +1176,7 @@ ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v10, v8, a1 -; LMULMAX2-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX2-RV64-NEXT: vnot.v v8, v8 ; LMULMAX2-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI7_0) ; LMULMAX2-RV64-NEXT: ld a1, %lo(.LCPI7_0)(a1) @@ -1275,7 +1275,7 @@ ; LMULMAX1-RV64-NEXT: vle64.v v9, (a0) ; LMULMAX1-RV64-NEXT: li a2, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v8, a2 -; LMULMAX1-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX1-RV64-NEXT: vnot.v v8, v8 ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: lui a3, %hi(.LCPI7_0) ; LMULMAX1-RV64-NEXT: ld a3, %lo(.LCPI7_0)(a3) @@ -1299,7 +1299,7 @@ ; LMULMAX1-RV64-NEXT: li a7, 56 ; LMULMAX1-RV64-NEXT: vsrl.vx v8, v8, a7 ; LMULMAX1-RV64-NEXT: vsub.vx v10, v9, a2 -; LMULMAX1-RV64-NEXT: vxor.vi v9, v9, -1 +; LMULMAX1-RV64-NEXT: vnot.v v9, v9 ; LMULMAX1-RV64-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vsrl.vi v10, v9, 1 ; LMULMAX1-RV64-NEXT: vand.vx v10, v10, a3 @@ -1370,7 +1370,7 @@ ; LMULMAX8-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV64-NEXT: li a1, 1 ; LMULMAX8-RV64-NEXT: vsub.vx v10, v8, a1 -; LMULMAX8-RV64-NEXT: vxor.vi v8, v8, -1 +; LMULMAX8-RV64-NEXT: vnot.v v8, v8 ; LMULMAX8-RV64-NEXT: vand.vv v8, v8, v10 ; LMULMAX8-RV64-NEXT: lui a1, %hi(.LCPI7_0) ; LMULMAX8-RV64-NEXT: ld a1, %lo(.LCPI7_0)(a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll @@ -573,7 +573,7 @@ ; CHECK-LABEL: truncstore_v2i16_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i16> %x to <2 x i8> @@ -637,7 +637,7 @@ ; CHECK-LABEL: truncstore_v4i16_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <4 x i16> %x to <4 x i8> @@ -723,7 +723,7 @@ ; CHECK-LABEL: truncstore_v8i16_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <8 x i16> %x to <8 x i8> @@ -847,8 +847,8 @@ ; LMULMAX1-LABEL: truncstore_v16i16_v16i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -857,7 +857,7 @@ ; LMULMAX4-LABEL: truncstore_v16i16_v16i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX4-NEXT: vse8.v v10, (a0) ; LMULMAX4-NEXT: ret %y = trunc <16 x i16> %x to <16 x i8> @@ -1031,9 +1031,9 @@ ; CHECK-LABEL: truncstore_v2i32_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i32> %x to <2 x i8> @@ -1045,7 +1045,7 @@ ; CHECK-LABEL: truncstore_v2i32_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i32> %x to <2 x i16> @@ -1083,9 +1083,9 @@ ; CHECK-LABEL: truncstore_v4i32_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <4 x i32> %x to <4 x i8> @@ -1097,7 +1097,7 @@ ; CHECK-LABEL: truncstore_v4i32_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <4 x i32> %x to <4 x i16> @@ -1157,13 +1157,13 @@ ; LMULMAX1-LABEL: truncstore_v8i32_v8i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1172,9 +1172,9 @@ ; LMULMAX4-LABEL: truncstore_v8i32_v8i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v10, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v10 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i32> %x to <8 x i8> @@ -1186,8 +1186,8 @@ ; LMULMAX1-LABEL: truncstore_v8i32_v8i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse16.v v8, (a0) @@ -1196,7 +1196,7 @@ ; LMULMAX4-LABEL: truncstore_v8i32_v8i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX4-NEXT: vse16.v v10, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i32> %x to <8 x i16> @@ -1270,25 +1270,25 @@ ; LMULMAX1-LABEL: truncstore_v16i32_v16i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 12 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1297,9 +1297,9 @@ ; LMULMAX4-LABEL: truncstore_v16i32_v16i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v12, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v12 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <16 x i32> %x to <16 x i8> @@ -1311,13 +1311,13 @@ ; LMULMAX1-LABEL: truncstore_v16i32_v16i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero -; LMULMAX1-NEXT: vnsrl.wx v10, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 +; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: addi a1, a0, 16 @@ -1328,7 +1328,7 @@ ; LMULMAX4-LABEL: truncstore_v16i32_v16i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 ; LMULMAX4-NEXT: vse16.v v12, (a0) ; LMULMAX4-NEXT: ret %y = trunc <16 x i32> %x to <16 x i16> @@ -1436,11 +1436,11 @@ ; CHECK-LABEL: truncstore_v2i64_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i64> %x to <2 x i8> @@ -1452,9 +1452,9 @@ ; CHECK-LABEL: truncstore_v2i64_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i64> %x to <2 x i16> @@ -1466,7 +1466,7 @@ ; CHECK-LABEL: truncstore_v2i64_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i64> %x to <2 x i32> @@ -1478,17 +1478,17 @@ ; LMULMAX1-LABEL: truncstore_v4i64_v4i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1497,11 +1497,11 @@ ; LMULMAX4-LABEL: truncstore_v4i64_v4i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v10, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v10 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <4 x i64> %x to <4 x i8> @@ -1513,13 +1513,13 @@ ; LMULMAX1-LABEL: truncstore_v4i64_v4i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse16.v v8, (a0) @@ -1528,9 +1528,9 @@ ; LMULMAX4-LABEL: truncstore_v4i64_v4i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v10, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v10 ; LMULMAX4-NEXT: vse16.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <4 x i64> %x to <4 x i16> @@ -1542,8 +1542,8 @@ ; LMULMAX1-LABEL: truncstore_v4i64_v4i32: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse32.v v8, (a0) @@ -1552,7 +1552,7 @@ ; LMULMAX4-LABEL: truncstore_v4i64_v4i32: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX4-NEXT: vse32.v v10, (a0) ; LMULMAX4-NEXT: ret %y = trunc <4 x i64> %x to <4 x i32> @@ -1564,33 +1564,33 @@ ; LMULMAX1-LABEL: truncstore_v8i64_v8i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1599,11 +1599,11 @@ ; LMULMAX4-LABEL: truncstore_v8i64_v8i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v12, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v12 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i64> %x to <8 x i8> @@ -1615,25 +1615,25 @@ ; LMULMAX1-LABEL: truncstore_v8i64_v8i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vse16.v v8, (a0) @@ -1642,9 +1642,9 @@ ; LMULMAX4-LABEL: truncstore_v8i64_v8i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v12, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v12 ; LMULMAX4-NEXT: vse16.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i64> %x to <8 x i16> @@ -1656,13 +1656,13 @@ ; LMULMAX1-LABEL: truncstore_v8i64_v8i32: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero -; LMULMAX1-NEXT: vnsrl.wx v10, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 +; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: addi a1, a0, 16 @@ -1673,7 +1673,7 @@ ; LMULMAX4-LABEL: truncstore_v8i64_v8i32: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 ; LMULMAX4-NEXT: vse32.v v12, (a0) ; LMULMAX4-NEXT: ret %y = trunc <8 x i64> %x to <8 x i32> @@ -1685,65 +1685,65 @@ ; LMULMAX1-LABEL: truncstore_v16i64_v16i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 6, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v12, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 10, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v13, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 10 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v14, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v14 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 14, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 12 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v15, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 14 ; LMULMAX1-NEXT: vse8.v v8, (a0) @@ -1752,17 +1752,17 @@ ; LMULMAX4-LABEL: truncstore_v16i64_v16i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v16, v12, zero +; LMULMAX4-NEXT: vncvt.x.x.w v16, v12 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v16, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v16 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v12, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v12 ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v14, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v14, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v14, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v14 ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX4-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX4-NEXT: vse8.v v8, (a0) @@ -1776,47 +1776,47 @@ ; LMULMAX1-LABEL: truncstore_v16i64_v16i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v13, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v10, v12, zero +; LMULMAX1-NEXT: vncvt.x.x.w v10, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v10, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v14, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v14 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v15, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 6 ; LMULMAX1-NEXT: addi a1, a0, 16 @@ -1827,13 +1827,13 @@ ; LMULMAX4-LABEL: truncstore_v16i64_v16i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v16, v12, zero +; LMULMAX4-NEXT: vncvt.x.x.w v16, v12 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v12, v16, zero +; LMULMAX4-NEXT: vncvt.x.x.w v12, v16 ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v14, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v14, v8 ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v8, v14, zero +; LMULMAX4-NEXT: vncvt.x.x.w v8, v14 ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, tu, mu ; LMULMAX4-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX4-NEXT: vse16.v v8, (a0) @@ -1847,23 +1847,23 @@ ; LMULMAX1-LABEL: truncstore_v16i64_v16i32: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero -; LMULMAX1-NEXT: vnsrl.wx v10, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 +; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v13, zero -; LMULMAX1-NEXT: vnsrl.wx v11, v12, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 +; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v11, v9, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v15, zero -; LMULMAX1-NEXT: vnsrl.wx v12, v14, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 +; LMULMAX1-NEXT: vncvt.x.x.w v12, v14 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v9, 2 ; LMULMAX1-NEXT: addi a1, a0, 48 @@ -1878,8 +1878,8 @@ ; LMULMAX4-LABEL: truncstore_v16i64_v16i32: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; LMULMAX4-NEXT: vnsrl.wx v16, v12, zero -; LMULMAX4-NEXT: vnsrl.wx v12, v8, zero +; LMULMAX4-NEXT: vncvt.x.x.w v16, v12 +; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, tu, mu ; LMULMAX4-NEXT: vslideup.vi v12, v16, 8 ; LMULMAX4-NEXT: vse32.v v12, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll @@ -201,7 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x @@ -229,7 +229,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x @@ -243,7 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x @@ -258,7 +258,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x @@ -273,7 +273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x @@ -1191,7 +1191,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX2-NEXT: vle16.v v8, (a0) -; LMULMAX2-NEXT: vfsgnjn.vv v8, v8, v8 +; LMULMAX2-NEXT: vfneg.v v8, v8 ; LMULMAX2-NEXT: vse16.v v8, (a0) ; LMULMAX2-NEXT: ret ; @@ -1201,8 +1201,8 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle16.v v8, (a1) ; LMULMAX1-NEXT: vle16.v v9, (a0) -; LMULMAX1-NEXT: vfsgnjn.vv v8, v8, v8 -; LMULMAX1-NEXT: vfsgnjn.vv v9, v9, v9 +; LMULMAX1-NEXT: vfneg.v v8, v8 +; LMULMAX1-NEXT: vfneg.v v9, v9 ; LMULMAX1-NEXT: vse16.v v9, (a0) ; LMULMAX1-NEXT: vse16.v v8, (a1) ; LMULMAX1-NEXT: ret @@ -1217,7 +1217,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v8, (a0) -; LMULMAX2-NEXT: vfsgnjn.vv v8, v8, v8 +; LMULMAX2-NEXT: vfneg.v v8, v8 ; LMULMAX2-NEXT: vse32.v v8, (a0) ; LMULMAX2-NEXT: ret ; @@ -1227,8 +1227,8 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle32.v v8, (a1) ; LMULMAX1-NEXT: vle32.v v9, (a0) -; LMULMAX1-NEXT: vfsgnjn.vv v8, v8, v8 -; LMULMAX1-NEXT: vfsgnjn.vv v9, v9, v9 +; LMULMAX1-NEXT: vfneg.v v8, v8 +; LMULMAX1-NEXT: vfneg.v v9, v9 ; LMULMAX1-NEXT: vse32.v v9, (a0) ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: ret @@ -1243,7 +1243,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-NEXT: vle64.v v8, (a0) -; LMULMAX2-NEXT: vfsgnjn.vv v8, v8, v8 +; LMULMAX2-NEXT: vfneg.v v8, v8 ; LMULMAX2-NEXT: vse64.v v8, (a0) ; LMULMAX2-NEXT: ret ; @@ -1253,8 +1253,8 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle64.v v8, (a1) ; LMULMAX1-NEXT: vle64.v v9, (a0) -; LMULMAX1-NEXT: vfsgnjn.vv v8, v8, v8 -; LMULMAX1-NEXT: vfsgnjn.vv v9, v9, v9 +; LMULMAX1-NEXT: vfneg.v v8, v8 +; LMULMAX1-NEXT: vfneg.v v9, v9 ; LMULMAX1-NEXT: vse64.v v9, (a0) ; LMULMAX1-NEXT: vse64.v v8, (a1) ; LMULMAX1-NEXT: ret @@ -1971,7 +1971,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI91_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI91_0)(a1) -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -1993,7 +1993,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI92_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI92_0)(a1) -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -2015,7 +2015,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: lui a1, %hi(.LCPI93_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI93_0)(a1) -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -2044,7 +2044,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI94_1)(a1) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -2071,7 +2071,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI95_1)(a1) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -2098,7 +2098,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI96_1)(a1) ; CHECK-NEXT: vfadd.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -2125,7 +2125,7 @@ ; CHECK-NEXT: flh ft1, %lo(.LCPI97_1)(a1) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -2152,7 +2152,7 @@ ; CHECK-NEXT: flw ft1, %lo(.LCPI98_1)(a1) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -2179,7 +2179,7 @@ ; CHECK-NEXT: fld ft1, %lo(.LCPI99_1)(a1) ; CHECK-NEXT: vfsub.vf v10, v9, ft0 ; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0 -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft1 ; CHECK-NEXT: vfsgnj.vv v9, v9, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 @@ -2201,7 +2201,7 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI100_0)(a1) ; CHECK-NEXT: lui a1, %hi(.LCPI100_1) ; CHECK-NEXT: flh ft1, %lo(.LCPI100_1)(a1) -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -2226,7 +2226,7 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI101_0)(a1) ; CHECK-NEXT: lui a1, %hi(.LCPI101_1) ; CHECK-NEXT: flw ft1, %lo(.LCPI101_1)(a1) -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -2251,7 +2251,7 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI102_0)(a1) ; CHECK-NEXT: lui a1, %hi(.LCPI102_1) ; CHECK-NEXT: fld ft1, %lo(.LCPI102_1)(a1) -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll @@ -348,9 +348,9 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x @@ -367,9 +367,9 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x @@ -410,9 +410,9 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; LMULMAX8-NEXT: vfncvt.rtz.x.f.w v12, v8 ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v8, v12, zero +; LMULMAX8-NEXT: vncvt.x.x.w v8, v12 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX8-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX8-NEXT: vse8.v v8, (a1) ; LMULMAX8-NEXT: ret ; @@ -429,31 +429,31 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v12, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v10, v12, zero +; LMULMAX1-NEXT: vncvt.x.x.w v10, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v10, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v12, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v11, v12, zero +; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v11, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v11, v11 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v11, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v9, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX1-NEXT: vse8.v v10, (a1) @@ -472,9 +472,9 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; LMULMAX8-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v8, v12, zero +; LMULMAX8-NEXT: vncvt.x.x.w v8, v12 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX8-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX8-NEXT: vse8.v v8, (a1) ; LMULMAX8-NEXT: ret ; @@ -491,31 +491,31 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v12, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v10, v12, zero +; LMULMAX1-NEXT: vncvt.x.x.w v10, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v10, v10, zero +; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v12, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v11, v12, zero +; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v11, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v11, v11 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v11, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v11, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX1-NEXT: vse8.v v10, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll @@ -169,9 +169,9 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x @@ -186,9 +186,9 @@ ; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX8-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX8-NEXT: vnsrl.wx v8, v10, zero +; LMULMAX8-NEXT: vncvt.x.x.w v8, v10 ; LMULMAX8-NEXT: vse8.v v8, (a1) ; LMULMAX8-NEXT: ret ; @@ -197,9 +197,9 @@ ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; LMULMAX2-NEXT: vnsrl.wx v10, v8, zero +; LMULMAX2-NEXT: vncvt.x.x.w v10, v8 ; LMULMAX2-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; LMULMAX2-NEXT: vnsrl.wx v8, v10, zero +; LMULMAX2-NEXT: vncvt.x.x.w v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a1) ; LMULMAX2-NEXT: ret ; @@ -210,13 +210,13 @@ ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v9, (a0) ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero +; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero +; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse8.v v8, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -6493,7 +6493,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x @@ -6509,7 +6509,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x @@ -6525,7 +6525,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x @@ -6541,7 +6541,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -1032,7 +1032,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1059,7 +1059,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1110,7 +1110,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1137,7 +1137,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1187,7 +1187,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1214,7 +1214,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1240,7 +1240,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1897,7 +1897,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1924,7 +1924,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -1975,7 +1975,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -2002,7 +2002,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -2052,7 +2052,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -2079,7 +2079,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -2105,7 +2105,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll @@ -44,14 +44,14 @@ ; RV32-LABEL: mscatter_v2i16_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i16_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i16> %val to <2 x i8> @@ -63,18 +63,18 @@ ; RV32-LABEL: mscatter_v2i32_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i32_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i32> %val to <2 x i8> @@ -86,22 +86,22 @@ ; RV32-LABEL: mscatter_v2i64_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i8> @@ -232,14 +232,14 @@ ; RV32-LABEL: mscatter_v2i32_truncstore_v2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i32_truncstore_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i32> %val to <2 x i16> @@ -251,18 +251,18 @@ ; RV32-LABEL: mscatter_v2i64_truncstore_v2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i16> @@ -466,14 +466,14 @@ ; RV32-LABEL: mscatter_v2i64_truncstore_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i32> @@ -827,7 +827,7 @@ ; RV32-NEXT: vsext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -852,7 +852,7 @@ ; RV32-NEXT: vzext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -899,7 +899,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -924,7 +924,7 @@ ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -970,7 +970,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -995,7 +995,7 @@ ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1019,7 +1019,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1609,7 +1609,7 @@ ; RV32-NEXT: vsext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1634,7 +1634,7 @@ ; RV32-NEXT: vzext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1681,7 +1681,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1706,7 +1706,7 @@ ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1752,7 +1752,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1777,7 +1777,7 @@ ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1801,7 +1801,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll @@ -20,7 +20,7 @@ ; CHECK-LABEL: vfneg_vv_v2f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer @@ -44,7 +44,7 @@ ; CHECK-LABEL: vfneg_vv_v4f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer @@ -68,7 +68,7 @@ ; CHECK-LABEL: vfneg_vv_v8f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer @@ -92,7 +92,7 @@ ; CHECK-LABEL: vfneg_vv_v16f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer @@ -116,7 +116,7 @@ ; CHECK-LABEL: vfneg_vv_v2f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer @@ -140,7 +140,7 @@ ; CHECK-LABEL: vfneg_vv_v4f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer @@ -164,7 +164,7 @@ ; CHECK-LABEL: vfneg_vv_v8f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer @@ -188,7 +188,7 @@ ; CHECK-LABEL: vfneg_vv_v16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer @@ -212,7 +212,7 @@ ; CHECK-LABEL: vfneg_vv_v2f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer @@ -236,7 +236,7 @@ ; CHECK-LABEL: vfneg_vv_v4f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer @@ -260,7 +260,7 @@ ; CHECK-LABEL: vfneg_vv_v8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer @@ -284,7 +284,7 @@ ; CHECK-LABEL: vfneg_vv_v15f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer @@ -308,7 +308,7 @@ ; CHECK-LABEL: vfneg_vv_v16f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer @@ -356,13 +356,13 @@ ; CHECK-NEXT: .LBB27_2: ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: vfsgnjn.vv v16, v16, v16 +; CHECK-NEXT: vfneg.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB27_4: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -965,7 +965,7 @@ ; RV32-NEXT: vsext.vf8 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -991,7 +991,7 @@ ; RV32-NEXT: vzext.vf8 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1040,7 +1040,7 @@ ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1066,7 +1066,7 @@ ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1114,7 +1114,7 @@ ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1140,7 +1140,7 @@ ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1165,7 +1165,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1702,7 +1702,7 @@ ; RV32-NEXT: vsext.vf8 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1728,7 +1728,7 @@ ; RV32-NEXT: vzext.vf8 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1777,7 +1777,7 @@ ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1803,7 +1803,7 @@ ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1851,7 +1851,7 @@ ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1877,7 +1877,7 @@ ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -1902,7 +1902,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v12, v8, zero +; RV32-NEXT: vncvt.x.x.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret @@ -2055,7 +2055,7 @@ ; RV32-NEXT: vsext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v12, v16, zero +; RV32-NEXT: vncvt.x.x.w v12, v16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v10, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2068,7 +2068,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v24, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v4, v24, zero +; RV32-NEXT: vncvt.x.x.w v4, v24 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v10 ; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t @@ -2128,7 +2128,7 @@ ; RV32-NEXT: vzext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v12, v16, zero +; RV32-NEXT: vncvt.x.x.w v12, v16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v10, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2141,7 +2141,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v24, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v4, v24, zero +; RV32-NEXT: vncvt.x.x.w v4, v24 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v10 ; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t @@ -2265,7 +2265,7 @@ ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v16, zero +; RV32-NEXT: vncvt.x.x.w v8, v16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v12, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2278,7 +2278,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v24, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v4, v24, zero +; RV32-NEXT: vncvt.x.x.w v4, v24 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v12 ; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t @@ -2338,7 +2338,7 @@ ; RV32-NEXT: vzext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v16, zero +; RV32-NEXT: vncvt.x.x.w v8, v16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v12, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2351,7 +2351,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v24, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v4, v24, zero +; RV32-NEXT: vncvt.x.x.w v4, v24 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v12 ; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t @@ -2474,7 +2474,7 @@ ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v4, v8, zero +; RV32-NEXT: vncvt.x.x.w v4, v8 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v1, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2487,7 +2487,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t @@ -2547,7 +2547,7 @@ ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v4, v8, zero +; RV32-NEXT: vncvt.x.x.w v4, v8 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v1, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2560,7 +2560,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t @@ -2616,7 +2616,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v28, v16, zero +; RV32-NEXT: vncvt.x.x.w v28, v16 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v24, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -2629,7 +2629,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v28, v8, zero +; RV32-NEXT: vncvt.x.x.w v28, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v24 ; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll @@ -26,7 +26,7 @@ ; RV32-LABEL: vpscatter_v2i16_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -34,7 +34,7 @@ ; RV64-LABEL: vpscatter_v2i16_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -47,9 +47,9 @@ ; RV32-LABEL: vpscatter_v2i32_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -57,9 +57,9 @@ ; RV64-LABEL: vpscatter_v2i32_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -72,11 +72,11 @@ ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -84,11 +84,11 @@ ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -194,7 +194,7 @@ ; RV32-LABEL: vpscatter_v2i32_truncstore_v2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -202,7 +202,7 @@ ; RV64-LABEL: vpscatter_v2i32_truncstore_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -215,9 +215,9 @@ ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -225,9 +225,9 @@ ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -442,7 +442,7 @@ ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -450,7 +450,7 @@ ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -779,7 +779,7 @@ ; RV32-NEXT: vsext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -805,7 +805,7 @@ ; RV32-NEXT: vzext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -854,7 +854,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -880,7 +880,7 @@ ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -928,7 +928,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -954,7 +954,7 @@ ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -979,7 +979,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1500,7 +1500,7 @@ ; RV32-NEXT: vsext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1526,7 +1526,7 @@ ; RV32-NEXT: vzext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1575,7 +1575,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1601,7 +1601,7 @@ ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1649,7 +1649,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1675,7 +1675,7 @@ ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1700,7 +1700,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; RV32-NEXT: vnsrl.wx v16, v12, zero +; RV32-NEXT: vncvt.x.x.w v16, v12 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1924,7 +1924,7 @@ ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: addi a1, a2, -16 ; RV32-NEXT: addi a4, sp, 16 @@ -1937,7 +1937,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu @@ -2058,7 +2058,7 @@ ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: addi a1, a2, -16 ; RV32-NEXT: addi a4, sp, 16 @@ -2071,7 +2071,7 @@ ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll @@ -120,7 +120,7 @@ ; CHECK-LABEL: vxor_vi_v2i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer @@ -222,7 +222,7 @@ ; CHECK-LABEL: vxor_vi_v4i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer @@ -324,7 +324,7 @@ ; CHECK-LABEL: vxor_vi_v8i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer @@ -426,7 +426,7 @@ ; CHECK-LABEL: vxor_vi_v9i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <9 x i8> poison, i8 -1, i32 0 %vb = shufflevector <9 x i8> %elt.head, <9 x i8> poison, <9 x i32> zeroinitializer @@ -528,7 +528,7 @@ ; CHECK-LABEL: vxor_vi_v16i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer @@ -630,7 +630,7 @@ ; CHECK-LABEL: vxor_vi_v2i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer @@ -732,7 +732,7 @@ ; CHECK-LABEL: vxor_vi_v4i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer @@ -834,7 +834,7 @@ ; CHECK-LABEL: vxor_vi_v8i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer @@ -936,7 +936,7 @@ ; CHECK-LABEL: vxor_vi_v16i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer @@ -1038,7 +1038,7 @@ ; CHECK-LABEL: vxor_vi_v2i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer @@ -1140,7 +1140,7 @@ ; CHECK-LABEL: vxor_vi_v4i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer @@ -1242,7 +1242,7 @@ ; CHECK-LABEL: vxor_vi_v8i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: vxor_vi_v16i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer @@ -1474,7 +1474,7 @@ ; CHECK-LABEL: vxor_vi_v2i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer @@ -1604,7 +1604,7 @@ ; CHECK-LABEL: vxor_vi_v4i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer @@ -1734,7 +1734,7 @@ ; CHECK-LABEL: vxor_vi_v8i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer @@ -1864,7 +1864,7 @@ ; CHECK-LABEL: vxor_vi_v16i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll @@ -12,7 +12,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI0_1) ; CHECK-NEXT: flh ft1, %lo(.LCPI0_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -33,7 +33,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI1_1) ; CHECK-NEXT: flh ft1, %lo(.LCPI1_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -54,7 +54,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI2_1) ; CHECK-NEXT: flh ft1, %lo(.LCPI2_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -75,7 +75,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI3_1) ; CHECK-NEXT: flh ft1, %lo(.LCPI3_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfadd.vf v10, v10, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v10 @@ -96,7 +96,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI4_1) ; CHECK-NEXT: flh ft1, %lo(.LCPI4_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfadd.vf v12, v12, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v12 @@ -117,7 +117,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI5_1) ; CHECK-NEXT: flh ft1, %lo(.LCPI5_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfadd.vf v16, v16, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16 @@ -138,7 +138,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI6_1) ; CHECK-NEXT: flw ft1, %lo(.LCPI6_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -159,7 +159,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI7_1) ; CHECK-NEXT: flw ft1, %lo(.LCPI7_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -180,7 +180,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI8_1) ; CHECK-NEXT: flw ft1, %lo(.LCPI8_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfadd.vf v10, v10, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v10 @@ -201,7 +201,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI9_1) ; CHECK-NEXT: flw ft1, %lo(.LCPI9_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfadd.vf v12, v12, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v12 @@ -222,7 +222,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI10_1) ; CHECK-NEXT: flw ft1, %lo(.LCPI10_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfadd.vf v16, v16, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16 @@ -243,7 +243,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI11_1) ; CHECK-NEXT: fld ft1, %lo(.LCPI11_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfadd.vf v9, v9, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9 @@ -264,7 +264,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI12_1) ; CHECK-NEXT: fld ft1, %lo(.LCPI12_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfadd.vf v10, v10, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v10 @@ -285,7 +285,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI13_1) ; CHECK-NEXT: fld ft1, %lo(.LCPI13_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfadd.vf v12, v12, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v12 @@ -306,7 +306,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI14_1) ; CHECK-NEXT: fld ft1, %lo(.LCPI14_1)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfadd.vf v16, v16, ft1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI0_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -28,7 +28,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -46,7 +46,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI2_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -64,7 +64,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vfcvt.f.x.v v10, v10 @@ -82,7 +82,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI4_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8 ; CHECK-NEXT: vfcvt.f.x.v v12, v12 @@ -100,7 +100,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8 ; CHECK-NEXT: vfcvt.f.x.v v16, v16 @@ -118,7 +118,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI6_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -136,7 +136,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI7_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -154,7 +154,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI8_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vfcvt.f.x.v v10, v10 @@ -172,7 +172,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI9_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8 ; CHECK-NEXT: vfcvt.f.x.v v12, v12 @@ -190,7 +190,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI10_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8 ; CHECK-NEXT: vfcvt.f.x.v v16, v16 @@ -208,7 +208,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI11_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI11_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v9, v8, v8 +; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v9, v9 @@ -226,7 +226,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI12_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI12_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v10, v8, v8 +; CHECK-NEXT: vfabs.v v10, v8 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vfcvt.f.x.v v10, v10 @@ -244,7 +244,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI13_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI13_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v12, v8, v8 +; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8 ; CHECK-NEXT: vfcvt.f.x.v v12, v12 @@ -262,7 +262,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI14_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI14_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v16, v8, v8 +; CHECK-NEXT: vfabs.v v16, v8 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8 ; CHECK-NEXT: vfcvt.f.x.v v16, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll b/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll --- a/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll +++ b/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll @@ -6,9 +6,9 @@ ; CHECK-LABEL: trunc_nxv4i32_to_nxv4i5: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %v = trunc %a to ret %v @@ -18,9 +18,9 @@ ; CHECK-LABEL: trunc_nxv1i32_to_nxv1i5: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %v = trunc %a to ret %v diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -1038,7 +1038,7 @@ ; RV32-NEXT: vsext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1065,7 +1065,7 @@ ; RV32-NEXT: vzext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1116,7 +1116,7 @@ ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1143,7 +1143,7 @@ ; RV32-NEXT: vzext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1193,7 +1193,7 @@ ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1220,7 +1220,7 @@ ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1246,7 +1246,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1969,7 +1969,7 @@ ; RV32-NEXT: vsext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -1996,7 +1996,7 @@ ; RV32-NEXT: vzext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2047,7 +2047,7 @@ ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2074,7 +2074,7 @@ ; RV32-NEXT: vzext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2124,7 +2124,7 @@ ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2151,7 +2151,7 @@ ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 @@ -2177,7 +2177,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll @@ -44,14 +44,14 @@ ; RV32-LABEL: mscatter_nxv2i16_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i16_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -63,18 +63,18 @@ ; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -86,22 +86,22 @@ ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV32-NEXT: vnsrl.wx v11, v8, zero +; RV32-NEXT: vncvt.x.x.w v11, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v11, zero +; RV32-NEXT: vncvt.x.x.w v8, v11 ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV64-NEXT: vnsrl.wx v12, v8, zero +; RV64-NEXT: vncvt.x.x.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v12, zero +; RV64-NEXT: vncvt.x.x.w v8, v12 ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -232,14 +232,14 @@ ; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -251,18 +251,18 @@ ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV32-NEXT: vnsrl.wx v11, v8, zero +; RV32-NEXT: vncvt.x.x.w v11, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v11, zero +; RV32-NEXT: vncvt.x.x.w v8, v11 ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV64-NEXT: vnsrl.wx v12, v8, zero +; RV64-NEXT: vncvt.x.x.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v12, zero +; RV64-NEXT: vncvt.x.x.w v8, v12 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -466,14 +466,14 @@ ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV32-NEXT: vnsrl.wx v11, v8, zero +; RV32-NEXT: vncvt.x.x.w v11, v8 ; RV32-NEXT: vsoxei32.v v11, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV64-NEXT: vnsrl.wx v12, v8, zero +; RV64-NEXT: vncvt.x.x.w v12, v8 ; RV64-NEXT: vsoxei64.v v12, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -827,7 +827,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -852,7 +852,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -899,7 +899,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -924,7 +924,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -970,7 +970,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -995,7 +995,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1019,7 +1019,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1609,7 +1609,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1634,7 +1634,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1681,7 +1681,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1706,7 +1706,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1752,7 +1752,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1777,7 +1777,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1801,7 +1801,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vfabs_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv1f16( %v) ret %r @@ -22,7 +22,7 @@ ; CHECK-LABEL: vfabs_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv2f16( %v) ret %r @@ -34,7 +34,7 @@ ; CHECK-LABEL: vfabs_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv4f16( %v) ret %r @@ -46,7 +46,7 @@ ; CHECK-LABEL: vfabs_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv8f16( %v) ret %r @@ -58,7 +58,7 @@ ; CHECK-LABEL: vfabs_nxv16f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv16f16( %v) ret %r @@ -70,7 +70,7 @@ ; CHECK-LABEL: vfabs_nxv32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv32f16( %v) ret %r @@ -82,7 +82,7 @@ ; CHECK-LABEL: vfabs_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv1f32( %v) ret %r @@ -94,7 +94,7 @@ ; CHECK-LABEL: vfabs_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv2f32( %v) ret %r @@ -106,7 +106,7 @@ ; CHECK-LABEL: vfabs_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv4f32( %v) ret %r @@ -118,7 +118,7 @@ ; CHECK-LABEL: vfabs_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv8f32( %v) ret %r @@ -130,7 +130,7 @@ ; CHECK-LABEL: vfabs_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv16f32( %v) ret %r @@ -142,7 +142,7 @@ ; CHECK-LABEL: vfabs_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv1f64( %v) ret %r @@ -154,7 +154,7 @@ ; CHECK-LABEL: vfabs_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv2f64( %v) ret %r @@ -166,7 +166,7 @@ ; CHECK-LABEL: vfabs_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv4f64( %v) ret %r @@ -178,7 +178,7 @@ ; CHECK-LABEL: vfabs_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv8f64( %v) ret %r diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: vfneg_vv_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -18,7 +18,7 @@ ; CHECK-LABEL: vfneg_vv_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -28,7 +28,7 @@ ; CHECK-LABEL: vfneg_vv_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -38,7 +38,7 @@ ; CHECK-LABEL: vfneg_vv_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -48,7 +48,7 @@ ; CHECK-LABEL: vfneg_vv_nxv16f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -58,7 +58,7 @@ ; CHECK-LABEL: vfneg_vv_nxv32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -68,7 +68,7 @@ ; CHECK-LABEL: vfneg_vv_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -78,7 +78,7 @@ ; CHECK-LABEL: vfneg_vv_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -88,7 +88,7 @@ ; CHECK-LABEL: vfneg_vv_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -98,7 +98,7 @@ ; CHECK-LABEL: vfneg_vv_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -108,7 +108,7 @@ ; CHECK-LABEL: vfneg_vv_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -118,7 +118,7 @@ ; CHECK-LABEL: vfneg_vv_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -128,7 +128,7 @@ ; CHECK-LABEL: vfneg_vv_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -138,7 +138,7 @@ ; CHECK-LABEL: vfneg_vv_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -148,7 +148,7 @@ ; CHECK-LABEL: vfneg_vv_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va ret %vb diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll @@ -20,7 +20,7 @@ ; CHECK-LABEL: vfneg_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -44,7 +44,7 @@ ; CHECK-LABEL: vfneg_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -68,7 +68,7 @@ ; CHECK-LABEL: vfneg_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -92,7 +92,7 @@ ; CHECK-LABEL: vfneg_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -116,7 +116,7 @@ ; CHECK-LABEL: vfneg_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -140,7 +140,7 @@ ; CHECK-LABEL: vfneg_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -164,7 +164,7 @@ ; CHECK-LABEL: vfneg_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -188,7 +188,7 @@ ; CHECK-LABEL: vfneg_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -212,7 +212,7 @@ ; CHECK-LABEL: vfneg_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -236,7 +236,7 @@ ; CHECK-LABEL: vfneg_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -260,7 +260,7 @@ ; CHECK-LABEL: vfneg_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -284,7 +284,7 @@ ; CHECK-LABEL: vfneg_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -308,7 +308,7 @@ ; CHECK-LABEL: vfneg_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -332,7 +332,7 @@ ; CHECK-LABEL: vfneg_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -356,7 +356,7 @@ ; CHECK-LABEL: vfneg_vv_nxv7f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -380,7 +380,7 @@ ; CHECK-LABEL: vfneg_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer @@ -431,13 +431,13 @@ ; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: sub a1, a0, a1 -; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: bltu a0, a1, .LBB33_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB33_4: ; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vv v16, v16, v16 +; CHECK-NEXT: vfneg.v v16, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll @@ -636,7 +636,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -648,7 +648,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -748,7 +748,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -760,7 +760,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -860,7 +860,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -872,7 +872,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -972,7 +972,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -984,7 +984,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1084,7 +1084,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1096,7 +1096,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1174,9 +1174,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1188,9 +1188,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1202,7 +1202,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1214,7 +1214,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v9, zero +; CHECK-NEXT: vncvt.x.x.w v8, v9 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1292,9 +1292,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1306,9 +1306,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1320,7 +1320,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1332,7 +1332,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1410,9 +1410,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1424,9 +1424,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1438,7 +1438,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1450,7 +1450,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1528,9 +1528,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v16, zero +; CHECK-NEXT: vncvt.x.x.w v10, v16 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1542,9 +1542,9 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v16, zero +; CHECK-NEXT: vncvt.x.x.w v10, v16 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1556,7 +1556,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1568,7 +1568,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll @@ -1056,7 +1056,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1082,7 +1082,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1131,7 +1131,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1157,7 +1157,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1205,7 +1205,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1231,7 +1231,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1256,7 +1256,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1851,7 +1851,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1877,7 +1877,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1926,7 +1926,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -1952,7 +1952,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2000,7 +2000,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2026,7 +2026,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2051,7 +2051,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2117,7 +2117,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2143,7 +2143,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2192,7 +2192,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2218,7 +2218,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2266,7 +2266,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2292,7 +2292,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2317,7 +2317,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret @@ -2466,7 +2466,7 @@ ; RV32-NEXT: vsext.vf4 v24, v10 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: srli a3, a2, 3 @@ -2480,7 +2480,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2536,7 +2536,7 @@ ; RV32-NEXT: vzext.vf4 v24, v10 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: srli a3, a2, 3 @@ -2550,7 +2550,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t ; RV32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll @@ -44,7 +44,7 @@ ; RV32-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -52,7 +52,7 @@ ; RV64-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -65,9 +65,9 @@ ; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -75,9 +75,9 @@ ; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -90,11 +90,11 @@ ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV32-NEXT: vnsrl.wx v11, v8, zero +; RV32-NEXT: vncvt.x.x.w v11, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v11, zero +; RV32-NEXT: vncvt.x.x.w v8, v11 ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret @@ -102,11 +102,11 @@ ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV64-NEXT: vnsrl.wx v12, v8, zero +; RV64-NEXT: vncvt.x.x.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v12, zero +; RV64-NEXT: vncvt.x.x.w v8, v12 ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -230,7 +230,7 @@ ; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v8, zero +; RV32-NEXT: vncvt.x.x.w v8, v8 ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret @@ -238,7 +238,7 @@ ; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v8, zero +; RV64-NEXT: vncvt.x.x.w v8, v8 ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -251,9 +251,9 @@ ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV32-NEXT: vnsrl.wx v11, v8, zero +; RV32-NEXT: vncvt.x.x.w v11, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32-NEXT: vnsrl.wx v8, v11, zero +; RV32-NEXT: vncvt.x.x.w v8, v11 ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret @@ -261,9 +261,9 @@ ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV64-NEXT: vnsrl.wx v12, v8, zero +; RV64-NEXT: vncvt.x.x.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64-NEXT: vnsrl.wx v8, v12, zero +; RV64-NEXT: vncvt.x.x.w v8, v12 ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -460,7 +460,7 @@ ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV32-NEXT: vnsrl.wx v11, v8, zero +; RV32-NEXT: vncvt.x.x.w v11, v8 ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v11, (zero), v10, v0.t ; RV32-NEXT: ret @@ -468,7 +468,7 @@ ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu -; RV64-NEXT: vnsrl.wx v12, v8, zero +; RV64-NEXT: vncvt.x.x.w v12, v8 ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v12, (zero), v10, v0.t ; RV64-NEXT: ret @@ -815,7 +815,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -841,7 +841,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -890,7 +890,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -916,7 +916,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -964,7 +964,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -990,7 +990,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1015,7 +1015,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1590,7 +1590,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1616,7 +1616,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1665,7 +1665,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1691,7 +1691,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1739,7 +1739,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1765,7 +1765,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1790,7 +1790,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1855,7 +1855,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1881,7 +1881,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1930,7 +1930,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -1956,7 +1956,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2004,7 +2004,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2030,7 +2030,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2055,7 +2055,7 @@ ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v16, zero +; RV32-NEXT: vncvt.x.x.w v24, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: ret @@ -2232,7 +2232,7 @@ ; RV32-NEXT: vsext.vf4 v16, v26 ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: addi a3, sp, 16 ; RV32-NEXT: vl8re8.v v8, (a3) # Unknown-size Folded Reload @@ -2248,7 +2248,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 @@ -2348,7 +2348,7 @@ ; RV32-NEXT: vzext.vf4 v16, v26 ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v24, v8, zero +; RV32-NEXT: vncvt.x.x.w v24, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: addi a3, sp, 16 ; RV32-NEXT: vl8re8.v v8, (a3) # Unknown-size Folded Reload @@ -2364,7 +2364,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, mu -; RV32-NEXT: vnsrl.wx v16, v8, zero +; RV32-NEXT: vncvt.x.x.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -507,7 +507,7 @@ ; CHECK-NEXT: # %bb.1: # %if ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v10, (a1) -; CHECK-NEXT: vwadd.vx v8, v10, zero +; CHECK-NEXT: vwcvt.x.x.v v8, v10 ; CHECK-NEXT: .LBB9_2: # %if.end ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll @@ -6,7 +6,7 @@ ; CHECK-LABEL: vtrunc_nxv1i16_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -16,7 +16,7 @@ ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -26,7 +26,7 @@ ; CHECK-LABEL: vtrunc_nxv4i16_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -36,7 +36,7 @@ ; CHECK-LABEL: vtrunc_nxv8i16_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -47,7 +47,7 @@ ; CHECK-LABEL: vtrunc_nxv16i16_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -58,9 +58,9 @@ ; CHECK-LABEL: vtrunc_nxv1i32_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -70,7 +70,7 @@ ; CHECK-LABEL: vtrunc_nxv1i32_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -80,9 +80,9 @@ ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -92,7 +92,7 @@ ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -102,9 +102,9 @@ ; CHECK-LABEL: vtrunc_nxv4i32_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -114,7 +114,7 @@ ; CHECK-LABEL: vtrunc_nxv4i32_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -125,9 +125,9 @@ ; CHECK-LABEL: vtrunc_nxv8i32_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -137,7 +137,7 @@ ; CHECK-LABEL: vtrunc_nxv8i32_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -148,9 +148,9 @@ ; CHECK-LABEL: vtrunc_nxv16i32_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -160,7 +160,7 @@ ; CHECK-LABEL: vtrunc_nxv16i32_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -171,11 +171,11 @@ ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -185,9 +185,9 @@ ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -197,7 +197,7 @@ ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -207,11 +207,11 @@ ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -221,9 +221,9 @@ ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -233,7 +233,7 @@ ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v8, zero +; CHECK-NEXT: vncvt.x.x.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -244,11 +244,11 @@ ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vncvt.x.x.w v8, v8 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -258,9 +258,9 @@ ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v12, zero +; CHECK-NEXT: vncvt.x.x.w v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -270,7 +270,7 @@ ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v12, v8, zero +; CHECK-NEXT: vncvt.x.x.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -281,11 +281,11 @@ ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v10, v16, zero +; CHECK-NEXT: vncvt.x.x.w v10, v16 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v10, zero +; CHECK-NEXT: vncvt.x.x.w v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -295,9 +295,9 @@ ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v16, zero +; CHECK-NEXT: vncvt.x.x.w v8, v16 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -307,7 +307,7 @@ ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vnsrl.wx v16, v8, zero +; CHECK-NEXT: vncvt.x.x.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %tvec = trunc %va to diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll @@ -28,7 +28,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -87,7 +87,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -146,7 +146,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -205,7 +205,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -264,7 +264,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -323,7 +323,7 @@ ; CHECK-LABEL: vxor_vi_nxv32i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -382,7 +382,7 @@ ; CHECK-LABEL: vxor_vi_nxv64i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -441,7 +441,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -500,7 +500,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -559,7 +559,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -618,7 +618,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -677,7 +677,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -736,7 +736,7 @@ ; CHECK-LABEL: vxor_vi_nxv32i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -795,7 +795,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -854,7 +854,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -913,7 +913,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -972,7 +972,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1031,7 +1031,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1103,7 +1103,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1175,7 +1175,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1247,7 +1247,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1319,7 +1319,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 %splat = shufflevector %head, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll @@ -110,7 +110,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -212,7 +212,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -314,7 +314,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -416,7 +416,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -518,7 +518,7 @@ ; CHECK-LABEL: vxor_vi_nxv15i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -620,7 +620,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -722,7 +722,7 @@ ; CHECK-LABEL: vxor_vi_nxv32i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -824,7 +824,7 @@ ; CHECK-LABEL: vxor_vi_nxv64i8_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -938,7 +938,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1040,7 +1040,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1142,7 +1142,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1244,7 +1244,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1346,7 +1346,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1448,7 +1448,7 @@ ; CHECK-LABEL: vxor_vi_nxv32i16_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1550,7 +1550,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1652,7 +1652,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1754,7 +1754,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1856,7 +1856,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1958,7 +1958,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i32_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2088,7 +2088,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2218,7 +2218,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2348,7 +2348,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2478,7 +2478,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i64_unmasked_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer diff --git a/llvm/test/MC/RISCV/rvv/add.s b/llvm/test/MC/RISCV/rvv/add.s --- a/llvm/test/MC/RISCV/rvv/add.s +++ b/llvm/test/MC/RISCV/rvv/add.s @@ -345,7 +345,7 @@ # CHECK-UNKNOWN: 57 64 40 c4 vwcvt.x.x.v v8, v4 -# CHECK-INST: vwadd.vx v8, v4, zero +# CHECK-INST: vwcvt.x.x.v v8, v4 # CHECK-ENCODING: [0x57,0x64,0x40,0xc6] # CHECK-ERROR: instruction requires the following: 'V' (Vector Extension for Application Processors), 'Zve32x' or 'Zve64x' (Vector Extensions for Embedded Processors) # CHECK-UNKNOWN: 57 64 40 c6 @@ -357,7 +357,7 @@ # CHECK-UNKNOWN: 57 64 40 c0 vwcvtu.x.x.v v8, v4 -# CHECK-INST: vwaddu.vx v8, v4, zero +# CHECK-INST: vwcvtu.x.x.v v8, v4 # CHECK-ENCODING: [0x57,0x64,0x40,0xc2] # CHECK-ERROR: instruction requires the following: 'V' (Vector Extension for Application Processors), 'Zve32x' or 'Zve64x' (Vector Extensions for Embedded Processors) # CHECK-UNKNOWN: 57 64 40 c2 diff --git a/llvm/test/MC/RISCV/rvv/xor.s b/llvm/test/MC/RISCV/rvv/xor.s --- a/llvm/test/MC/RISCV/rvv/xor.s +++ b/llvm/test/MC/RISCV/rvv/xor.s @@ -51,7 +51,7 @@ # CHECK-UNKNOWN: 57 b4 4f 2c vnot.v v8, v4 -# CHECK-INST: vxor.vi v8, v4, -1 +# CHECK-INST: vnot.v v8, v4 # CHECK-ENCODING: [0x57,0xb4,0x4f,0x2e] # CHECK-ERROR: instruction requires the following: 'V' (Vector Extension for Application Processors), 'Zve32x' or 'Zve64x' (Vector Extensions for Embedded Processors) # CHECK-UNKNOWN: 57 b4 4f 2e diff --git a/llvm/utils/TableGen/AsmWriterEmitter.cpp b/llvm/utils/TableGen/AsmWriterEmitter.cpp --- a/llvm/utils/TableGen/AsmWriterEmitter.cpp +++ b/llvm/utils/TableGen/AsmWriterEmitter.cpp @@ -867,8 +867,6 @@ IAPrinter IAP(CGA.Result->getAsString(), FlatAliasAsmString, NumMIOps); - bool CantHandle = false; - unsigned MIOpNum = 0; for (unsigned i = 0, e = LastOpNo; i != e; ++i) { // Skip over tied operands as they're not part of an alias declaration. @@ -968,10 +966,9 @@ break; } case CodeGenInstAlias::ResultOperand::K_Reg: - // If this is zero_reg, something's playing tricks we're not - // equipped to handle. if (!CGA.ResultOperands[i].getRegister()) { - CantHandle = true; + IAP.addCond(std::string(formatv( + "AliasPatternCond::K_Reg, {0}::NoRegister", Namespace))); break; } @@ -984,8 +981,6 @@ MIOpNum += RO.getMINumOperands(); } - if (CantHandle) continue; - std::vector ReqFeatures; if (PassSubtarget) { // We only consider ReqFeatures predicates if PassSubtarget